Java 类org.apache.hadoop.hbase.regionserver.compactions.CompactSelection 实例源码

项目:LCIndex-HBase-0.94.16    文件:CompactJobQueue.java   
private void doMinorCompact() throws IOException {
  // hfilePath = lcc/AAA/.tmp/BBB/
  // destLCIndexDir = lcc/AAA/.lctmp/BBB.lccindex
  // every qualifier must be merged seperately!
  // merge B1-Q1, B2-Q1, B3-Q1 into B-target-Q1
  // shoud not use
  for (Entry<Path, List<Path>> entry : allIdxPaths.entrySet()) {
    List<StoreFile> lccSFList = new ArrayList<StoreFile>();
    List<Path> statPathList = new ArrayList<Path>();
    for (Path lcIdxPath : entry.getValue()) {
      StoreFile lccSF =
          new StoreFile(store.localfs, lcIdxPath, store.conf, store.cacheConf, store
              .getFamily().getBloomFilterType(), store.getDataBlockEncoder());
      lccSF.createReader();
      lccSFList.add(lccSF);
      statPathList.add(new Path(lcIdxPath.getParent(), lcIdxPath.getName()
          + LCCIndexConstant.LC_STAT_FILE_SUFFIX));
    }
    CompactSelection lccIndexFilesToCompactCS = new CompactSelection(store.conf, lccSFList);
    CompactionRequest lccCR =
        new CompactionRequest(request.getHRegion(), store, lccIndexFilesToCompactCS,
            request.isMajor(), request.getPriority());
    long maxId = StoreFile.getMaxSequenceIdInList(lccSFList, true);
    Path destPath = new Path(destLCIndexDir, entry.getKey().getName());
    // compact stat file, and then compact storefile
    store.compactor.mWinterCompactStatFile(store.localfs, statPathList, new Path(
        destLCIndexDir, entry.getKey().getName() + LCCIndexConstant.LC_STAT_FILE_SUFFIX));
    StoreFile.Writer writer = store.compactor.lcIdxCompact(lccCR, maxId, destPath, true);
    for (StoreFile sf : lccSFList) {
      sf.closeReader(true);
    }
    if (printForDebug) {
      System.out.println("winter minor compact flush to: " + writer.getPath());
    }
  }
}
项目:LCIndex-HBase-0.94.16    文件:Store.java   
private CompactSelection defaultCompactionSelection(CompactSelection compactSelection) {
  // we're doing a minor compaction, let's see what files are applicable
  int start = 0;

  double r = compactSelection.getCompactSelectionRatio();

  // get store file sizes for incremental compacting selection.
  int countOfFiles = compactSelection.getFilesToCompact().size();
  long[] fileSizes = new long[countOfFiles];
  long[] sumSize = new long[countOfFiles];
  for (int i = countOfFiles - 1; i >= 0; --i) {
    StoreFile file = compactSelection.getFilesToCompact().get(i);
    fileSizes[i] = file.getReader().length();
    // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
    int tooFar = i + this.maxFilesToCompact - 1;
    sumSize[i] =
        fileSizes[i] + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0)
            - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0);
  }

  /*
   * Start at the oldest file and stop when you find the first file that meets compaction
   * criteria: (1) a recently-flushed, small file (i.e. <= minCompactSize) OR (2) within the
   * compactRatio of sum(newer_files) Given normal skew, any newer files will also meet this
   * criteria Additional Note: If fileSizes.size() >> maxFilesToCompact, we will recurse on
   * compact(). Consider the oldest files first to avoid a situation where we always compact
   * [end-threshold,end). Then, the last file becomes an aggregate of the previous compactions.
   */
  while (countOfFiles - start >= this.minFilesToCompact
      && fileSizes[start] > Math.max(minCompactSize, (long) (sumSize[start + 1] * r))) {
    ++start;
  }
  int end = Math.min(countOfFiles, start + this.maxFilesToCompact);
  long totalSize = fileSizes[start] + ((start + 1 < countOfFiles) ? sumSize[start + 1] : 0);
  compactSelection = compactSelection.getSubList(start, end);

  // if we don't have enough files to compact, just wait
  if (compactSelection.getFilesToCompact().size() < this.minFilesToCompact) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Skipped compaction of " + this + ".  Only " + (end - start)
          + " file(s) of size " + StringUtils.humanReadableInt(totalSize)
          + " have met compaction criteria.");
    }
    compactSelection.emptyFileList();
    return compactSelection;
  }
  return compactSelection;
}
项目:IRIndex    文件:Store.java   
private CompactSelection defaultCompactionSelection(CompactSelection compactSelection) {
  // we're doing a minor compaction, let's see what files are applicable
  int start = 0;

  double r = compactSelection.getCompactSelectionRatio();

  // get store file sizes for incremental compacting selection.
  int countOfFiles = compactSelection.getFilesToCompact().size();
  long [] fileSizes = new long[countOfFiles];
  long [] sumSize = new long[countOfFiles];
  for (int i = countOfFiles-1; i >= 0; --i) {
    StoreFile file = compactSelection.getFilesToCompact().get(i);
    fileSizes[i] = file.getReader().length();
    // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
    int tooFar = i + this.maxFilesToCompact - 1;
    sumSize[i] = fileSizes[i]
        + ((i+1    < countOfFiles) ? sumSize[i+1]      : 0)
        - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0);
  }

    /* Start at the oldest file and stop when you find the first file that
     * meets compaction criteria:
     *   (1) a recently-flushed, small file (i.e. <= minCompactSize)
     *      OR
     *   (2) within the compactRatio of sum(newer_files)
     * Given normal skew, any newer files will also meet this criteria
     *
     * Additional Note:
     * If fileSizes.size() >> maxFilesToCompact, we will recurse on
     * compact().  Consider the oldest files first to avoid a
     * situation where we always compact [end-threshold,end).  Then, the
     * last file becomes an aggregate of the previous compactions.
     */
  while(countOfFiles - start >= this.minFilesToCompact &&
      fileSizes[start] >
          Math.max(minCompactSize, (long)(sumSize[start+1] * r))) {
    ++start;
  }
  int end = Math.min(countOfFiles, start + this.maxFilesToCompact);
  long totalSize = fileSizes[start]
      + ((start+1 < countOfFiles) ? sumSize[start+1] : 0);
  compactSelection = compactSelection.getSubList(start, end);

  // if we don't have enough files to compact, just wait
  if (compactSelection.getFilesToCompact().size() < this.minFilesToCompact) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Skipped compaction of " + this
          + ".  Only " + (end - start) + " file(s) of size "
          + StringUtils.humanReadableInt(totalSize)
          + " have met compaction criteria.");
    }
    compactSelection.emptyFileList();
    return compactSelection;
  }
  return compactSelection;
}
项目:HBase-Research    文件:Store.java   
private CompactSelection defaultCompactionSelection(CompactSelection compactSelection) {
  // we're doing a minor compaction, let's see what files are applicable
  int start = 0;

  double r = compactSelection.getCompactSelectionRatio();

  // get store file sizes for incremental compacting selection.
  int countOfFiles = compactSelection.getFilesToCompact().size();
  long [] fileSizes = new long[countOfFiles];
  long [] sumSize = new long[countOfFiles];
  for (int i = countOfFiles-1; i >= 0; --i) {
    StoreFile file = compactSelection.getFilesToCompact().get(i);
    fileSizes[i] = file.getReader().length();
    // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
    int tooFar = i + this.maxFilesToCompact - 1;
    sumSize[i] = fileSizes[i]
        + ((i+1    < countOfFiles) ? sumSize[i+1]      : 0)
        - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0);
  }

    /* Start at the oldest file and stop when you find the first file that
     * meets compaction criteria:
     *   (1) a recently-flushed, small file (i.e. <= minCompactSize)
     *      OR
     *   (2) within the compactRatio of sum(newer_files)
     * Given normal skew, any newer files will also meet this criteria
     *
     * Additional Note:
     * If fileSizes.size() >> maxFilesToCompact, we will recurse on
     * compact().  Consider the oldest files first to avoid a
     * situation where we always compact [end-threshold,end).  Then, the
     * last file becomes an aggregate of the previous compactions.
     */
  while(countOfFiles - start >= this.minFilesToCompact &&
      fileSizes[start] >
          Math.max(minCompactSize, (long)(sumSize[start+1] * r))) {
    ++start;
  }
  int end = Math.min(countOfFiles, start + this.maxFilesToCompact);
  long totalSize = fileSizes[start]
      + ((start+1 < countOfFiles) ? sumSize[start+1] : 0);
  compactSelection = compactSelection.getSubList(start, end);

  // if we don't have enough files to compact, just wait
  if (compactSelection.getFilesToCompact().size() < this.minFilesToCompact) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Skipped compaction of " + this
          + ".  Only " + (end - start) + " file(s) of size "
          + StringUtils.humanReadableInt(totalSize)
          + " have met compaction criteria.");
    }
    compactSelection.emptyFileList();
    return compactSelection;
  }
  return compactSelection;
}
项目:LCIndex-HBase-0.94.16    文件:Store.java   
/**
 * Algorithm to choose which files to compact, see {@link #compactSelection(java.util.List, int)}
 * @param candidates
 * @return
 * @throws IOException
 */
CompactSelection compactSelection(List<StoreFile> candidates) throws IOException {
  return compactSelection(candidates, NO_PRIORITY);
}
项目:IRIndex    文件:Store.java   
/**
 * Algorithm to choose which files to compact, see {@link #compactSelection(java.util.List, int)}
 * @param candidates
 * @return
 * @throws IOException
 */
CompactSelection compactSelection(List<StoreFile> candidates) throws IOException {
  return compactSelection(candidates,NO_PRIORITY);
}
项目:HBase-Research    文件:Store.java   
/**
 * Algorithm to choose which files to compact, see {@link #compactSelection(java.util.List, int)}
 * @param candidates
 * @return
 * @throws IOException
 */
CompactSelection compactSelection(List<StoreFile> candidates) throws IOException {
  return compactSelection(candidates,NO_PRIORITY);
}
项目:hbase-0.94.8-qod    文件:Store.java   
/**
 * Algorithm to choose which files to compact, see {@link #compactSelection(java.util.List, int)}
 * @param candidates
 * @return
 * @throws IOException
 */
CompactSelection compactSelection(List<StoreFile> candidates) throws IOException {
  return compactSelection(candidates,NO_PRIORITY);
}
项目:hbase-0.94.8-qod    文件:Store.java   
/**
 * Algorithm to choose which files to compact, see {@link #compactSelection(java.util.List, int)}
 * @param candidates
 * @return
 * @throws IOException
 */
CompactSelection compactSelection(List<StoreFile> candidates) throws IOException {
  return compactSelection(candidates,NO_PRIORITY);
}
项目:hindex    文件:Store.java   
/**
 * Algorithm to choose which files to compact, see {@link #compactSelection(java.util.List, int)}
 * @param candidates
 * @return
 * @throws IOException
 */
CompactSelection compactSelection(List<StoreFile> candidates) throws IOException {
  return compactSelection(candidates,NO_PRIORITY);
}