Java 类org.apache.hadoop.util.LightWeightGSet 实例源码

项目:hadoop    文件:BlocksMap.java   
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfoContiguous>(capacity) {
    @Override
    public Iterator<BlockInfoContiguous> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
项目:aliyun-oss-hadoop-fs    文件:BlocksMap.java   
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity) {
    @Override
    public Iterator<BlockInfo> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
项目:big-c    文件:BlocksMap.java   
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfoContiguous>(capacity) {
    @Override
    public Iterator<BlockInfoContiguous> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlocksMap.java   
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity) {
    @Override
    public Iterator<BlockInfo> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
项目:FlexMap    文件:BlocksMap.java   
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity) {
    @Override
    public Iterator<BlockInfo> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
项目:hadoop-on-lustre2    文件:BlocksMap.java   
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity) {
    @Override
    public Iterator<BlockInfo> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
项目:hadoop-oss    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > MAX_CAPACITY ? capacity : MAX_CAPACITY;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
项目:hadoop    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:hadoop    文件:CacheManager.java   
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
项目:hadoop    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
项目:aliyun-oss-hadoop-fs    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map =
      new LightWeightGSet<>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
项目:aliyun-oss-hadoop-fs    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > MAX_CAPACITY ? capacity : MAX_CAPACITY;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
项目:big-c    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:big-c    文件:CacheManager.java   
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
项目:big-c    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CacheManager.java   
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
项目:hadoop-2.6.0-cdh5.4.3    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
项目:hadoop-plus    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:hadoop-plus    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
}
项目:FlexMap    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:FlexMap    文件:CacheManager.java   
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
项目:hops    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > MAX_CAPACITY ? capacity : MAX_CAPACITY;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
项目:hadoop-TCP    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:hadoop-TCP    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
}
项目:hardfs    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:hardfs    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
}
项目:hadoop-on-lustre2    文件:INodeMap.java   
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
项目:hadoop-on-lustre2    文件:CacheManager.java   
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value " + MIN_CACHED_BLOCKS_PERCENT +
      " for " + DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
项目:hadoop-on-lustre2    文件:RetryCache.java   
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
项目:hadoop-oss    文件:RetryCache.java   
@VisibleForTesting
public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
  return set;
}
项目:hadoop    文件:BlockInfoContiguous.java   
@Override
public LightWeightGSet.LinkedElement getNext() {
  return nextLinkedElement;
}
项目:hadoop    文件:BlockInfoContiguous.java   
@Override
public void setNext(LightWeightGSet.LinkedElement next) {
  this.nextLinkedElement = next;
}
项目:hadoop    文件:RetryCache.java   
@VisibleForTesting
public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
  return set;
}
项目:aliyun-oss-hadoop-fs    文件:BlockInfo.java   
@Override
public LightWeightGSet.LinkedElement getNext() {
  return nextLinkedElement;
}
项目:aliyun-oss-hadoop-fs    文件:BlockInfo.java   
@Override
public void setNext(LightWeightGSet.LinkedElement next) {
  this.nextLinkedElement = next;
}
项目:aliyun-oss-hadoop-fs    文件:RetryCache.java   
@VisibleForTesting
public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
  return set;
}
项目:big-c    文件:BlockInfoContiguous.java   
@Override
public LightWeightGSet.LinkedElement getNext() {
  return nextLinkedElement;
}
项目:big-c    文件:BlockInfoContiguous.java   
@Override
public void setNext(LightWeightGSet.LinkedElement next) {
  this.nextLinkedElement = next;
}