Java 类org.apache.hadoop.hbase.util.IdLock 实例源码

项目:hbase    文件:MobFileCache.java   
/**
 * Evicts the cached file by the name.
 * @param fileName The name of a cached file.
 */
public void evictFile(String fileName) {
  if (isCacheEnabled) {
    IdLock.Entry lockEntry = null;
    try {
      // obtains the lock to close the cached file.
      lockEntry = keyLock.getLockEntry(fileName.hashCode());
      CachedMobFile evictedFile = map.remove(fileName);
      if (evictedFile != null) {
        evictedFile.close();
        evictedFileCount.increment();
      }
    } catch (IOException e) {
      LOG.error("Failed to evict the file " + fileName, e);
    } finally {
      if (lockEntry != null) {
        keyLock.releaseLockEntry(lockEntry);
      }
    }
  }
}
项目:hbase    文件:MobFileCache.java   
/**
 * Closes a mob file.
 * @param file The mob file that needs to be closed.
 */
public void closeFile(MobFile file) {
  IdLock.Entry lockEntry = null;
  try {
    if (!isCacheEnabled) {
      file.close();
    } else {
      lockEntry = keyLock.getLockEntry(file.getFileName().hashCode());
      file.close();
    }
  } catch (IOException e) {
    LOG.error("MobFileCache, Exception happen during close " + file.getFileName(), e);
  } finally {
    if (lockEntry != null) {
      keyLock.releaseLockEntry(lockEntry);
    }
  }
}
项目:hbase    文件:HMaster.java   
public void reportMobCompactionStart(TableName tableName) throws IOException {
  IdLock.Entry lockEntry = null;
  try {
    lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
    AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
    if (compactionsCount == null) {
      compactionsCount = new AtomicInteger(0);
      mobCompactionStates.put(tableName, compactionsCount);
    }
    compactionsCount.incrementAndGet();
  } finally {
    if (lockEntry != null) {
      mobCompactionLock.releaseLockEntry(lockEntry);
    }
  }
}
项目:hbase    文件:HMaster.java   
public void reportMobCompactionEnd(TableName tableName) throws IOException {
  IdLock.Entry lockEntry = null;
  try {
    lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
    AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
    if (compactionsCount != null) {
      int count = compactionsCount.decrementAndGet();
      // remove the entry if the count is 0.
      if (count == 0) {
        mobCompactionStates.remove(tableName);
      }
    }
  } finally {
    if (lockEntry != null) {
      mobCompactionLock.releaseLockEntry(lockEntry);
    }
  }
}
项目:HBase-LOB    文件:MobFileCache.java   
/**
 * Evicts the cached file by the name.
 * @param fileName The name of a cached file.
 */
public void evictFile(String fileName) {
  if (isCacheEnabled) {
    IdLock.Entry lockEntry = null;
    try {
      // obtains the lock to close the cached file.
      lockEntry = keyLock.getLockEntry(fileName.hashCode());
      CachedMobFile evictedFile = map.remove(fileName);
      if (evictedFile != null) {
        evictedFile.close();
      }
    } catch (IOException e) {
      LOG.error("Fail to evict the file " + fileName, e);
    } finally {
      if (lockEntry != null) {
        keyLock.releaseLockEntry(lockEntry);
      }
    }
  }
}
项目:HBase-LOB    文件:MobFileCache.java   
/**
 * Closes a mob file.
 * @param file The mob file that needs to be closed.
 */
public void closeFile(MobFile file) {
  IdLock.Entry lockEntry = null;
  try {
    if (!isCacheEnabled) {
      file.close();
    } else {
      lockEntry = keyLock.getLockEntry(file.getFileName().hashCode());
      file.close();
    }
  } catch (IOException e) {
    LOG.error("MobFileCache, Exception happen during close " + file.getFileName(), e);
  } finally {
    if (lockEntry != null) {
      keyLock.releaseLockEntry(lockEntry);
    }
  }
}
项目:pbase    文件:BucketCache.java   
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
  if (!cacheEnabled) return false;
  RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
  if (removedBlock != null) {
    this.blockNumber.decrementAndGet();
    this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize());
  }
  BucketEntry bucketEntry = backingMap.get(cacheKey);
  if (bucketEntry != null) {
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.remove(cacheKey))) {
        bucketAllocator.freeBlock(bucketEntry.offset());
        realCacheSize.addAndGet(-1 * bucketEntry.getLength());
        blocksByHFile.remove(cacheKey.getHfileName(), cacheKey);
        if (removedBlock == null) {
          this.blockNumber.decrementAndGet();
        }
      } else {
        return false;
      }
    } catch (IOException ie) {
      LOG.warn("Failed evicting block " + cacheKey);
      return false;
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  cacheStats.evicted(bucketEntry == null? 0: bucketEntry.getCachedTime());
  return true;
}
项目:HIndex    文件:BucketCache.java   
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
  if (!cacheEnabled) return false;
  RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
  if (removedBlock != null) {
    this.blockNumber.decrementAndGet();
    this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize());
  }
  BucketEntry bucketEntry = backingMap.get(cacheKey);
  if (bucketEntry != null) {
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.remove(cacheKey))) {
        bucketAllocator.freeBlock(bucketEntry.offset());
        realCacheSize.addAndGet(-1 * bucketEntry.getLength());
        blocksByHFile.remove(cacheKey.getHfileName(), cacheKey);
        if (removedBlock == null) {
          this.blockNumber.decrementAndGet();
        }
      } else {
        return false;
      }
    } catch (IOException ie) {
      LOG.warn("Failed evicting block " + cacheKey);
      return false;
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  cacheStats.evicted();
  return true;
}
项目:hbase    文件:MobFileCache.java   
/**
 * Opens a mob file.
 * @param fs The current file system.
 * @param path The file path.
 * @param cacheConf The current MobCacheConfig
 * @return A opened mob file.
 * @throws IOException
 */
public MobFile openFile(FileSystem fs, Path path, MobCacheConfig cacheConf) throws IOException {
  if (!isCacheEnabled) {
    MobFile mobFile = MobFile.create(fs, path, conf, cacheConf);
    mobFile.open();
    return mobFile;
  } else {
    String fileName = path.getName();
    CachedMobFile cached = map.get(fileName);
    IdLock.Entry lockEntry = keyLock.getLockEntry(fileName.hashCode());
    try {
      if (cached == null) {
        cached = map.get(fileName);
        if (cached == null) {
          if (map.size() > mobFileMaxCacheSize) {
            evict();
          }
          cached = CachedMobFile.create(fs, path, conf, cacheConf);
          cached.open();
          map.put(fileName, cached);
          miss.increment();
        }
      }
      cached.open();
      cached.access(count.incrementAndGet());
    } finally {
      keyLock.releaseLockEntry(lockEntry);
    }
    return cached;
  }
}
项目:PyroDB    文件:BucketCache.java   
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
  if (!cacheEnabled) return false;
  RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
  if (removedBlock != null) {
    this.blockNumber.decrementAndGet();
    this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize());
  }
  BucketEntry bucketEntry = backingMap.get(cacheKey);
  if (bucketEntry != null) {
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.remove(cacheKey))) {
        bucketAllocator.freeBlock(bucketEntry.offset());
        realCacheSize.addAndGet(-1 * bucketEntry.getLength());
        blocksByHFile.remove(cacheKey.getHfileName(), cacheKey);
        if (removedBlock == null) {
          this.blockNumber.decrementAndGet();
        }
      } else {
        return false;
      }
    } catch (IOException ie) {
      LOG.warn("Failed evicting block " + cacheKey);
      return false;
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  cacheStats.evicted();
  return true;
}
项目:c5    文件:BucketCache.java   
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
  if (!cacheEnabled) return false;
  RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
  if (removedBlock != null) {
    this.blockNumber.decrementAndGet();
    this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize());
  }
  BucketEntry bucketEntry = backingMap.get(cacheKey);
  if (bucketEntry != null) {
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.remove(cacheKey))) {
        bucketAllocator.freeBlock(bucketEntry.offset());
        realCacheSize.addAndGet(-1 * bucketEntry.getLength());
        blocksByHFile.remove(cacheKey.getHfileName(), cacheKey);
        if (removedBlock == null) {
          this.blockNumber.decrementAndGet();
        }
      } else {
        return false;
      }
    } catch (IOException ie) {
      LOG.warn("Failed evicting block " + cacheKey);
      return false;
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  cacheStats.evicted();
  return true;
}
项目:HBase-LOB    文件:MobFileCache.java   
/**
 * Opens a mob file.
 * @param fs The current file system.
 * @param path The file path.
 * @param cacheConf The current MobCacheConfig
 * @return A opened mob file.
 * @throws IOException
 */
public MobFile openFile(FileSystem fs, Path path, MobCacheConfig cacheConf) throws IOException {
  if (!isCacheEnabled) {
    return MobFile.create(fs, path, conf, cacheConf);
  } else {
    String fileName = path.getName();
    CachedMobFile cached = map.get(fileName);
    IdLock.Entry lockEntry = keyLock.getLockEntry(fileName.hashCode());
    try {
      if (cached == null) {
        cached = map.get(fileName);
        if (cached == null) {
          if (map.size() > mobFileMaxCacheSize) {
            evict();
          }
          cached = CachedMobFile.create(fs, path, conf, cacheConf);
          cached.open();
          map.put(fileName, cached);
          miss.incrementAndGet();
        }
      }
      cached.open();
      cached.access(count.incrementAndGet());
    } finally {
      keyLock.releaseLockEntry(lockEntry);
    }
    return cached;
  }
}
项目:DominoHBase    文件:BucketCache.java   
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
  if (!cacheEnabled) return false;
  RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
  if (removedBlock != null) {
    this.blockNumber.decrementAndGet();
    this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize());
  }
  BucketEntry bucketEntry = backingMap.get(cacheKey);
  if (bucketEntry != null) {
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.remove(cacheKey))) {
        bucketAllocator.freeBlock(bucketEntry.offset());
        realCacheSize.addAndGet(-1 * bucketEntry.getLength());
        if (removedBlock == null) {
          this.blockNumber.decrementAndGet();
        }
      } else {
        return false;
      }
    } catch (IOException ie) {
      LOG.warn("Failed evicting block " + cacheKey);
      return false;
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  cacheStats.evicted();
  return true;
}
项目:pbase    文件:BucketCache.java   
/**
 * Get the buffer of the block with the specified key.
 * @param key block's cache key
 * @param caching true if the caller caches blocks on cache misses
 * @param repeat Whether this is a repeat lookup for the same block
 * @param updateCacheMetrics Whether we should update cache metrics or not
 * @return buffer of specified cache key, or null if not in cache
 */
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
    boolean updateCacheMetrics) {
  if (!cacheEnabled)
    return null;
  RAMQueueEntry re = ramCache.get(key);
  if (re != null) {
    if (updateCacheMetrics) cacheStats.hit(caching);
    re.access(accessCount.incrementAndGet());
    return re.getData();
  }
  BucketEntry bucketEntry = backingMap.get(key);
  if (bucketEntry != null) {
    long start = System.nanoTime();
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.get(key))) {
        int len = bucketEntry.getLength();
        ByteBuffer bb = ByteBuffer.allocate(len);
        int lenRead = ioEngine.read(bb, bucketEntry.offset());
        if (lenRead != len) {
          throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
        }
        CacheableDeserializer<Cacheable> deserializer =
          bucketEntry.deserializerReference(this.deserialiserMap);
        Cacheable cachedBlock = deserializer.deserialize(bb, true);
        long timeTaken = System.nanoTime() - start;
        if (updateCacheMetrics) {
          cacheStats.hit(caching);
          cacheStats.ioHit(timeTaken);
        }
        bucketEntry.access(accessCount.incrementAndGet());
        if (this.ioErrorStartTime > 0) {
          ioErrorStartTime = -1;
        }
        return cachedBlock;
      }
    } catch (IOException ioex) {
      LOG.error("Failed reading block " + key + " from bucket cache", ioex);
      checkIOErrorIsTolerated();
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  if (!repeat && updateCacheMetrics) cacheStats.miss(caching);
  return null;
}
项目:HIndex    文件:BucketCache.java   
/**
 * Get the buffer of the block with the specified key.
 * @param key block's cache key
 * @param caching true if the caller caches blocks on cache misses
 * @param repeat Whether this is a repeat lookup for the same block
 * @param updateCacheMetrics Whether we should update cache metrics or not
 * @return buffer of specified cache key, or null if not in cache
 */
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
    boolean updateCacheMetrics) {
  if (!cacheEnabled)
    return null;
  RAMQueueEntry re = ramCache.get(key);
  if (re != null) {
    if (updateCacheMetrics) cacheStats.hit(caching);
    re.access(accessCount.incrementAndGet());
    return re.getData();
  }
  BucketEntry bucketEntry = backingMap.get(key);
  if(bucketEntry!=null) {
    long start = System.nanoTime();
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.get(key))) {
        int len = bucketEntry.getLength();
        ByteBuffer bb = ByteBuffer.allocate(len);
        int lenRead = ioEngine.read(bb, bucketEntry.offset());
        if (lenRead != len) {
          throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
        }
        Cacheable cachedBlock = bucketEntry.deserializerReference(
            deserialiserMap).deserialize(bb, true);
        long timeTaken = System.nanoTime() - start;
        if (updateCacheMetrics) {
          cacheStats.hit(caching);
          cacheStats.ioHit(timeTaken);
        }
        bucketEntry.access(accessCount.incrementAndGet());
        if (this.ioErrorStartTime > 0) {
          ioErrorStartTime = -1;
        }
        return cachedBlock;
      }
    } catch (IOException ioex) {
      LOG.error("Failed reading block " + key + " from bucket cache", ioex);
      checkIOErrorIsTolerated();
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  if (!repeat && updateCacheMetrics) cacheStats.miss(caching);
  return null;
}
项目:hbase    文件:HMobStore.java   
/**
 * Reads the cell from the mob file.
 * @param reference The cell found in the HBase, its value is a path to a mob file.
 * @param cacheBlocks Whether the scanner should cache blocks.
 * @param readPt the read point.
 * @param readEmptyValueOnMobCellMiss Whether return null value when the mob file is
 *        missing or corrupt.
 * @return The cell found in the mob file.
 * @throws IOException
 */
public Cell resolve(Cell reference, boolean cacheBlocks, long readPt,
  boolean readEmptyValueOnMobCellMiss) throws IOException {
  Cell result = null;
  if (MobUtils.hasValidMobRefCellValue(reference)) {
    String fileName = MobUtils.getMobFileName(reference);
    Tag tableNameTag = MobUtils.getTableNameTag(reference);
    if (tableNameTag != null) {
      String tableNameString = Tag.getValueAsString(tableNameTag);
      List<Path> locations = map.get(tableNameString);
      if (locations == null) {
        IdLock.Entry lockEntry = keyLock.getLockEntry(tableNameString.hashCode());
        try {
          locations = map.get(tableNameString);
          if (locations == null) {
            locations = new ArrayList<>(2);
            TableName tn = TableName.valueOf(tableNameString);
            locations.add(MobUtils.getMobFamilyPath(conf, tn, family.getNameAsString()));
            locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils
                .getMobRegionInfo(tn).getEncodedName(), family.getNameAsString()));
            map.put(tableNameString, locations);
          }
        } finally {
          keyLock.releaseLockEntry(lockEntry);
        }
      }
      result = readCell(locations, fileName, reference, cacheBlocks, readPt,
        readEmptyValueOnMobCellMiss);
    }
  }
  if (result == null) {
    LOG.warn("The Cell result is null, assemble a new Cell with the same row,family,"
        + "qualifier,timestamp,type and tags but with an empty value to return.");
    result = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY)
            .setRow(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength())
            .setFamily(reference.getFamilyArray(), reference.getFamilyOffset(),
              reference.getFamilyLength())
            .setQualifier(reference.getQualifierArray(),
              reference.getQualifierOffset(), reference.getQualifierLength())
            .setTimestamp(reference.getTimestamp())
            .setType(reference.getTypeByte())
            .setValue(HConstants.EMPTY_BYTE_ARRAY)
            .setTags(reference.getTagsArray(), reference.getTagsOffset(),
              reference.getTagsLength())
            .build();
  }
  return result;
}
项目:RStore    文件:HFileReaderV2.java   
/**
 * Read in a file block.
 *
 * @param dataBlockOffset offset to read.
 * @param onDiskBlockSize size of the block
 * @param cacheBlock
 * @param pread Use positional read instead of seek+read (positional is better
 *          doing random reads whereas seek+read is better scanning).
 * @param isCompaction is this block being read as part of a compaction
 * @return Block wrapped in a ByteBuffer.
 * @throws IOException
 */
@Override
public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize,
    final boolean cacheBlock, boolean pread, final boolean isCompaction)
    throws IOException {
  if (dataBlockIndexReader == null) {
    throw new IOException("Block index not loaded");
  }
  if (dataBlockOffset < 0
      || dataBlockOffset >= trailer.getLoadOnOpenDataOffset()) {
    throw new IOException("Requested block is out of range: "
        + dataBlockOffset + ", lastDataBlockOffset: "
        + trailer.getLastDataBlockOffset());
  }
  // For any given block from any given file, synchronize reads for said
  // block.
  // Without a cache, this synchronizing is needless overhead, but really
  // the other choice is to duplicate work (which the cache would prevent you
  // from doing).

  BlockCacheKey cacheKey = HFile.getBlockCacheKey(name, dataBlockOffset);
  IdLock.Entry lockEntry = offsetLock.getLockEntry(dataBlockOffset);
  try {
    blockLoads.incrementAndGet();

    if (cacheConf.isBlockCacheEnabled()) {
      HFileBlock cachedBlock =
        (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
      if (cachedBlock != null) {
        cacheHits.incrementAndGet();

        return cachedBlock;
      }
      // Carry on, please load.
    }

    // Load block from filesystem.
    long startTimeNs = System.nanoTime();
    HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset,
        onDiskBlockSize, -1, pread);
    BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();

    HFile.readTimeNano.addAndGet(System.nanoTime() - startTimeNs);
    HFile.readOps.incrementAndGet();

    // Cache the block
    if (cacheBlock && cacheConf.shouldCacheBlockOnRead(
      hfileBlock.getBlockType().getCategory())) {
      cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
          cacheConf.isInMemory());
    }

    return hfileBlock;
  } finally {
    offsetLock.releaseLockEntry(lockEntry);
  }
}
项目:PyroDB    文件:BucketCache.java   
/**
 * Get the buffer of the block with the specified key.
 * @param key block's cache key
 * @param caching true if the caller caches blocks on cache misses
 * @param repeat Whether this is a repeat lookup for the same block
 * @param updateCacheMetrics Whether we should update cache metrics or not
 * @return buffer of specified cache key, or null if not in cache
 */
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
    boolean updateCacheMetrics) {
  if (!cacheEnabled)
    return null;
  RAMQueueEntry re = ramCache.get(key);
  if (re != null) {
    if (updateCacheMetrics) cacheStats.hit(caching);
    re.access(accessCount.incrementAndGet());
    return re.getData();
  }
  BucketEntry bucketEntry = backingMap.get(key);
  if (bucketEntry != null) {
    long start = System.nanoTime();
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.get(key))) {
        int len = bucketEntry.getLength();
        ByteBuffer bb = ByteBuffer.allocate(len);
        int lenRead = ioEngine.read(bb, bucketEntry.offset());
        if (lenRead != len) {
          throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
        }
        Cacheable cachedBlock = bucketEntry.deserializerReference(
            deserialiserMap).deserialize(bb, true);
        long timeTaken = System.nanoTime() - start;
        if (updateCacheMetrics) {
          cacheStats.hit(caching);
          cacheStats.ioHit(timeTaken);
        }
        bucketEntry.access(accessCount.incrementAndGet());
        if (this.ioErrorStartTime > 0) {
          ioErrorStartTime = -1;
        }
        return cachedBlock;
      }
    } catch (IOException ioex) {
      LOG.error("Failed reading block " + key + " from bucket cache", ioex);
      checkIOErrorIsTolerated();
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  if (!repeat && updateCacheMetrics) cacheStats.miss(caching);
  return null;
}
项目:c5    文件:BucketCache.java   
/**
 * Get the buffer of the block with the specified key.
 * @param key block's cache key
 * @param caching true if the caller caches blocks on cache misses
 * @param repeat Whether this is a repeat lookup for the same block
 * @return buffer of specified cache key, or null if not in cache
 */
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat) {
  if (!cacheEnabled)
    return null;
  RAMQueueEntry re = ramCache.get(key);
  if (re != null) {
    cacheStats.hit(caching);
    re.access(accessCount.incrementAndGet());
    return re.getData();
  }
  BucketEntry bucketEntry = backingMap.get(key);
  if(bucketEntry!=null) {
    long start = System.nanoTime();
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.get(key))) {
        int len = bucketEntry.getLength();
        ByteBuffer bb = ByteBuffer.allocate(len);
        ioEngine.read(bb, bucketEntry.offset());
        Cacheable cachedBlock = bucketEntry.deserializerReference(
            deserialiserMap).deserialize(bb, true);
        long timeTaken = System.nanoTime() - start;
        cacheStats.hit(caching);
        cacheStats.ioHit(timeTaken);
        bucketEntry.access(accessCount.incrementAndGet());
        if (this.ioErrorStartTime > 0) {
          ioErrorStartTime = -1;
        }
        return cachedBlock;
      }
    } catch (IOException ioex) {
      LOG.error("Failed reading block " + key + " from bucket cache", ioex);
      checkIOErrorIsTolerated();
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  if(!repeat)cacheStats.miss(caching);
  return null;
}
项目:DominoHBase    文件:BucketCache.java   
/**
 * Get the buffer of the block with the specified key.
 * @param key block's cache key
 * @param caching true if the caller caches blocks on cache misses
 * @param repeat Whether this is a repeat lookup for the same block
 * @return buffer of specified cache key, or null if not in cache
 */
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat) {
  if (!cacheEnabled)
    return null;
  RAMQueueEntry re = ramCache.get(key);
  if (re != null) {
    cacheStats.hit(caching);
    re.access(accessCount.incrementAndGet());
    return re.getData();
  }
  BucketEntry bucketEntry = backingMap.get(key);
  if(bucketEntry!=null) {
    long start = System.nanoTime();
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.get(key))) {
        int len = bucketEntry.getLength();
        ByteBuffer bb = ByteBuffer.allocate(len);
        ioEngine.read(bb, bucketEntry.offset());
        Cacheable cachedBlock = bucketEntry.deserializerReference(
            deserialiserMap).deserialize(bb, true);
        long timeTaken = System.nanoTime() - start;
        cacheStats.hit(caching);
        cacheStats.ioHit(timeTaken);
        bucketEntry.access(accessCount.incrementAndGet());
        if (this.ioErrorStartTime > 0) {
          ioErrorStartTime = -1;
        }
        return cachedBlock;
      }
    } catch (IOException ioex) {
      LOG.error("Failed reading block " + key + " from bucket cache", ioex);
      checkIOErrorIsTolerated();
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  if(!repeat)cacheStats.miss(caching);
  return null;
}