Java 类org.apache.hadoop.hbase.regionserver.StoreUtils 实例源码

项目:ditb    文件:FIFOCompactionPolicy.java   
@Override
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    List<StoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {

  if(forceMajor){
    LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag.");
  }
  boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate selection to the parent policy.");
    return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, 
      mayUseOffPeak, forceMajor);
  }

  // Nothing to compact
  Collection<StoreFile> toCompact = getExpiredStores(candidateFiles, filesCompacting);
  CompactionRequest result = new CompactionRequest(toCompact);
  return result;
}
项目:ditb    文件:RatioBasedCompactionPolicy.java   
/**
 * @param filesToCompact
 * @return When to run next major compaction
 */
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        // Synchronized to ensure one user of random instance at a time.
        double rnd = -1;
        synchronized (this) {
          this.random.setSeed(seed);
          rnd = this.random.nextDouble();
        }
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // If seed is null, then no storefiles == no major compaction
      }
    }
  }
  return ret;
}
项目:pbase    文件:RatioBasedCompactionPolicy.java   
/**
 * @param filesToCompact
 * @return When to run next major compaction
 */
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        // Synchronized to ensure one user of random instance at a time.
        double rnd = -1;
        synchronized (this) {
          this.random.setSeed(seed);
          rnd = this.random.nextDouble();
        }
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // If seed is null, then no storefiles == no major compaction
      }
    }
  }
  return ret;
}
项目:HIndex    文件:RatioBasedCompactionPolicy.java   
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        double rnd = (new Random(seed)).nextDouble();
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // no storefiles == no major compaction
      }
    }
  }
  return ret;
}
项目:hbase    文件:FIFOCompactionPolicy.java   
@Override
public CompactionRequestImpl selectCompaction(Collection<HStoreFile> candidateFiles,
    List<HStoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {
  if(forceMajor){
    LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag.");
  }
  boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate selection to the parent policy.");
    return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, 
      mayUseOffPeak, forceMajor);
  }

  // Nothing to compact
  Collection<HStoreFile> toCompact = getExpiredStores(candidateFiles, filesCompacting);
  CompactionRequestImpl result = new CompactionRequestImpl(toCompact);
  return result;
}
项目:PyroDB    文件:RatioBasedCompactionPolicy.java   
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        double rnd = (new Random(seed)).nextDouble();
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // no storefiles == no major compaction
      }
    }
  }
  return ret;
}
项目:c5    文件:RatioBasedCompactionPolicy.java   
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        double rnd = (new Random(seed)).nextDouble();
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // no storefiles == no major compaction
      }
    }
  }
  return ret;
}
项目:DominoHBase    文件:CompactionPolicy.java   
public long getNextMajorCompactTime(final List<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        double rnd = (new Random(seed)).nextDouble();
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // no storefiles == no major compaction
      }
    }
  }
  return ret;
}
项目:ditb    文件:FIFOCompactionPolicy.java   
@Override
public boolean isMajorCompaction(Collection<StoreFile> filesToCompact) throws IOException {
  boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.isMajorCompaction(filesToCompact);
  }
  return false;
}
项目:ditb    文件:FIFOCompactionPolicy.java   
@Override
public boolean needsCompaction(Collection<StoreFile> storeFiles, 
    List<StoreFile> filesCompacting) {  
  boolean isAfterSplit = StoreUtils.hasReferences(storeFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.needsCompaction(storeFiles, filesCompacting);
  }
  return hasExpiredStores(storeFiles);
}
项目:ditb    文件:StripeCompactionPolicy.java   
public boolean needsCompactions(StripeInformationProvider si, List<StoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
项目:pbase    文件:StripeCompactionPolicy.java   
public boolean needsCompactions(StripeInformationProvider si, List<StoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
项目:HIndex    文件:StripeCompactionPolicy.java   
public boolean needsCompactions(StripeInformationProvider si, List<StoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
项目:hbase    文件:DateTieredCompactor.java   
private boolean needEmptyFile(CompactionRequestImpl request) {
  // if we are going to compact the last N files, then we need to emit an empty file to retain the
  // maxSeqId if we haven't written out anything.
  OptionalLong maxSeqId = StoreUtils.getMaxSequenceIdInList(request.getFiles());
  OptionalLong storeMaxSeqId = store.getMaxSequenceId();
  return maxSeqId.isPresent() && storeMaxSeqId.isPresent() &&
      maxSeqId.getAsLong() == storeMaxSeqId.getAsLong();
}
项目:hbase    文件:SortedCompactionPolicy.java   
/**
 * @param filesToCompact
 * @return When to run next major compaction
 */
public long getNextMajorCompactTime(Collection<HStoreFile> filesToCompact) {
  /** Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_PERIOD}. */
  long period = comConf.getMajorCompactionPeriod();
  if (period <= 0) {
    return period;
  }

  /**
   * Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER},
   * that is, +/- 3.5 days (7 days * 0.5).
   */
  double jitterPct = comConf.getMajorCompactionJitter();
  if (jitterPct <= 0) {
    return period;
  }

  // deterministic jitter avoids a major compaction storm on restart
  OptionalInt seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
  if (seed.isPresent()) {
    // Synchronized to ensure one user of random instance at a time.
    double rnd;
    synchronized (this) {
      this.random.setSeed(seed.getAsInt());
      rnd = this.random.nextDouble();
    }
    long jitter = Math.round(period * jitterPct);
    return period + jitter - Math.round(2L * jitter * rnd);
  } else {
    return 0L;
  }
}
项目:hbase    文件:FIFOCompactionPolicy.java   
@Override
public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
  throws IOException {
  boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.shouldPerformMajorCompaction(filesToCompact);
  }
  return false;
}
项目:hbase    文件:FIFOCompactionPolicy.java   
@Override
public boolean needsCompaction(Collection<HStoreFile> storeFiles,
    List<HStoreFile> filesCompacting) {
  boolean isAfterSplit = StoreUtils.hasReferences(storeFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.needsCompaction(storeFiles, filesCompacting);
  }
  return hasExpiredStores(storeFiles);
}
项目:hbase    文件:StripeCompactionPolicy.java   
public boolean needsCompactions(StripeInformationProvider si, List<HStoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
项目:PyroDB    文件:StripeCompactionPolicy.java   
public boolean needsCompactions(StripeInformationProvider si, List<StoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
项目:DominoHBase    文件:CompactionPolicy.java   
/**
 * @param candidateFiles candidate files, ordered from oldest to newest
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactSelection selectCompaction(List<StoreFile> candidateFiles,
    boolean isUserCompaction, boolean forceMajor)
  throws IOException {
  // Prelimanry compaction subject to filters
  CompactSelection candidateSelection = new CompactSelection(candidateFiles);
  long cfTtl = this.storeConfig.getStoreFileTtl();
  if (!forceMajor) {
    // If there are expired files, only select them so that compaction deletes them
    if (comConf.shouldDeleteExpired() && (cfTtl != Long.MAX_VALUE)) {
      CompactSelection expiredSelection = selectExpiredStoreFiles(
        candidateSelection, EnvironmentEdgeManager.currentTimeMillis() - cfTtl);
      if (expiredSelection != null) {
        return expiredSelection;
      }
    }
    candidateSelection = skipLargeFiles(candidateSelection);
  }

  // Force a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested
  // as a major compaction.
  // Or, if there are any references among the candidates.
  boolean majorCompaction = (
    (forceMajor && isUserCompaction)
    || ((forceMajor || isMajorCompaction(candidateSelection.getFilesToCompact()))
        && (candidateSelection.getFilesToCompact().size() < comConf.getMaxFilesToCompact()))
    || StoreUtils.hasReferences(candidateSelection.getFilesToCompact())
    );

  if (!majorCompaction) {
    // we're doing a minor compaction, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection =
      removeExcessFiles(candidateSelection, isUserCompaction, majorCompaction);
  return candidateSelection;
}
项目:ditb    文件:RatioBasedCompactionPolicy.java   
/**
 * @param candidateFiles candidate files, ordered from oldest to newest. All files in store.
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  // If we can't have all files, we cannot do major anyway
  boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
  if (!(forceMajor && isAllFiles)) {
    candidateSelection = skipLargeFiles(candidateSelection, mayUseOffPeak);
    isAllFiles = candidateFiles.size() == candidateSelection.size();
  }

  // Try a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested as a major compaction
  boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction)
      || (((forceMajor && isAllFiles) || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
  // Or, if there are any references among the candidates.
  boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection);
  if (!isTryingMajor && !isAfterSplit) {
    // We're are not compacting all files, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, isTryingMajor);
  // Now we have the final file list, so we can determine if we can do major/all files.
  isAllFiles = (candidateFiles.size() == candidateSelection.size());
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !isAllFiles && mayUseOffPeak);
  result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);
  return result;
}
项目:ditb    文件:RatioBasedCompactionPolicy.java   
@Override
public boolean isMajorCompaction(final Collection<StoreFile> filesToCompact)
    throws IOException {
  boolean result = false;
  long mcTime = getNextMajorCompactTime(filesToCompact);
  if (filesToCompact == null || filesToCompact.isEmpty() || mcTime == 0) {
    return result;
  }
  // TODO: Use better method for determining stamp of last major (HBASE-2990)
  long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
  long now = System.currentTimeMillis();
  if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) {
    // Major compaction time has elapsed.
    long cfTtl = this.storeConfigInfo.getStoreFileTtl();
    if (filesToCompact.size() == 1) {
      // Single file
      StoreFile sf = filesToCompact.iterator().next();
      Long minTimestamp = sf.getMinimumTimestamp();
      long oldest = (minTimestamp == null)
          ? Long.MIN_VALUE
          : now - minTimestamp.longValue();
      if (sf.isMajorCompaction() &&
          (cfTtl == HConstants.FOREVER || oldest < cfTtl)) {
        float blockLocalityIndex = sf.getHDFSBlockDistribution().getBlockLocalityIndex(
            RSRpcServices.getHostname(comConf.conf, false)
        );
        if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Major compaction triggered on only store " + this +
                "; to make hdfs blocks local, current blockLocalityIndex is " +
                blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() +
                ")");
          }
          result = true;
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Skipping major compaction of " + this +
                " because one (major) compacted file only, oldestTime " +
                oldest + "ms is < ttl=" + cfTtl + " and blockLocalityIndex is " +
                blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() +
                ")");
          }
        }
      } else if (cfTtl != HConstants.FOREVER && oldest > cfTtl) {
        LOG.debug("Major compaction triggered on store " + this +
          ", because keyvalues outdated; time since last major compaction " +
          (now - lowTimestamp) + "ms");
        result = true;
      }
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Major compaction triggered on store " + this +
            "; time since last major compaction " + (now - lowTimestamp) + "ms");
      }
      result = true;
    }
  }
  return result;
}
项目:pbase    文件:RatioBasedCompactionPolicy.java   
/**
 * @param candidateFiles candidate files, ordered from oldest to newest. All files in store.
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  // If we can't have all files, we cannot do major anyway
  boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
  if (!(forceMajor && isAllFiles)) {
    candidateSelection = skipLargeFiles(candidateSelection);
    isAllFiles = candidateFiles.size() == candidateSelection.size();
  }

  // Try a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested as a major compaction
  boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction)
      || (((forceMajor && isAllFiles) || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
  // Or, if there are any references among the candidates.
  boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection);
  if (!isTryingMajor && !isAfterSplit) {
    // We're are not compacting all files, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, isTryingMajor);
  // Now we have the final file list, so we can determine if we can do major/all files.
  isAllFiles = (candidateFiles.size() == candidateSelection.size());
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !isAllFiles && mayUseOffPeak);
  result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);
  return result;
}
项目:pbase    文件:RatioBasedCompactionPolicy.java   
public boolean isMajorCompaction(final Collection<StoreFile> filesToCompact)
    throws IOException {
  boolean result = false;
  long mcTime = getNextMajorCompactTime(filesToCompact);
  if (filesToCompact == null || filesToCompact.isEmpty() || mcTime == 0) {
    return result;
  }
  // TODO: Use better method for determining stamp of last major (HBASE-2990)
  long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
  long now = System.currentTimeMillis();
  if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) {
    // Major compaction time has elapsed.
    long cfTtl = this.storeConfigInfo.getStoreFileTtl();
    if (filesToCompact.size() == 1) {
      // Single file
      StoreFile sf = filesToCompact.iterator().next();
      Long minTimestamp = sf.getMinimumTimestamp();
      long oldest = (minTimestamp == null)
          ? Long.MIN_VALUE
          : now - minTimestamp.longValue();
      if (sf.isMajorCompaction() &&
          (cfTtl == HConstants.FOREVER || oldest < cfTtl)) {
        float blockLocalityIndex = sf.getHDFSBlockDistribution().getBlockLocalityIndex(
            RSRpcServices.getHostname(comConf.conf)
        );
        if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Major compaction triggered on only store " + this +
                "; to make hdfs blocks local, current blockLocalityIndex is " +
                blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() +
                ")");
          }
          result = true;
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Skipping major compaction of " + this +
                " because one (major) compacted file only, oldestTime " +
                oldest + "ms is < ttl=" + cfTtl + " and blockLocalityIndex is " +
                blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() +
                ")");
          }
        }
      } else if (cfTtl != HConstants.FOREVER && oldest > cfTtl) {
        LOG.debug("Major compaction triggered on store " + this +
          ", because keyvalues outdated; time since last major compaction " +
          (now - lowTimestamp) + "ms");
        result = true;
      }
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Major compaction triggered on store " + this +
            "; time since last major compaction " + (now - lowTimestamp) + "ms");
      }
      result = true;
    }
  }
  return result;
}
项目:HIndex    文件:RatioBasedCompactionPolicy.java   
/**
 * @param candidateFiles candidate files, ordered from oldest to newest
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  long cfTtl = this.storeConfigInfo.getStoreFileTtl();
  if (!forceMajor) {
    // If there are expired files, only select them so that compaction deletes them
    if (comConf.shouldDeleteExpired() && (cfTtl != Long.MAX_VALUE)) {
      ArrayList<StoreFile> expiredSelection = selectExpiredStoreFiles(
          candidateSelection, EnvironmentEdgeManager.currentTimeMillis() - cfTtl);
      if (expiredSelection != null) {
        return new CompactionRequest(expiredSelection);
      }
    }
    candidateSelection = skipLargeFiles(candidateSelection);
  }

  // Force a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested
  // as a major compaction.
  // Or, if there are any references among the candidates.
  boolean majorCompaction = (
    (forceMajor && isUserCompaction)
    || ((forceMajor || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()))
    || StoreUtils.hasReferences(candidateSelection)
    );

  if (!majorCompaction) {
    // we're doing a minor compaction, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, majorCompaction);
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !majorCompaction && mayUseOffPeak);
  return result;
}
项目:HIndex    文件:RatioBasedCompactionPolicy.java   
public boolean isMajorCompaction(final Collection<StoreFile> filesToCompact)
    throws IOException {
  boolean result = false;
  long mcTime = getNextMajorCompactTime(filesToCompact);
  if (filesToCompact == null || filesToCompact.isEmpty() || mcTime == 0) {
    return result;
  }
  // TODO: Use better method for determining stamp of last major (HBASE-2990)
  long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
  long now = System.currentTimeMillis();
  if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) {
    // Major compaction time has elapsed.
    long cfTtl = this.storeConfigInfo.getStoreFileTtl();
    if (filesToCompact.size() == 1) {
      // Single file
      StoreFile sf = filesToCompact.iterator().next();
      Long minTimestamp = sf.getMinimumTimestamp();
      long oldest = (minTimestamp == null)
          ? Long.MIN_VALUE
          : now - minTimestamp.longValue();
      if (sf.isMajorCompaction() &&
          (cfTtl == HConstants.FOREVER || oldest < cfTtl)) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Skipping major compaction of " + this +
              " because one (major) compacted file only and oldestTime " +
              oldest + "ms is < ttl=" + cfTtl);
        }
      } else if (cfTtl != HConstants.FOREVER && oldest > cfTtl) {
        LOG.debug("Major compaction triggered on store " + this +
          ", because keyvalues outdated; time since last major compaction " +
          (now - lowTimestamp) + "ms");
        result = true;
      }
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Major compaction triggered on store " + this +
            "; time since last major compaction " + (now - lowTimestamp) + "ms");
      }
      result = true;
    }
  }
  return result;
}
项目:hbase    文件:DateTieredCompactionPolicy.java   
@Override
public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
    throws IOException {
  long mcTime = getNextMajorCompactTime(filesToCompact);
  if (filesToCompact == null || mcTime == 0) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("filesToCompact: " + filesToCompact + " mcTime: " + mcTime);
    }
    return false;
  }

  // TODO: Use better method for determining stamp of last major (HBASE-2990)
  long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
  long now = EnvironmentEdgeManager.currentTime();
  if (lowTimestamp <= 0L || lowTimestamp >= (now - mcTime)) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " +
          now + " mcTime: " + mcTime); 
    }
    return false;
  }

  long cfTTL = this.storeConfigInfo.getStoreFileTtl();
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  List<Long> boundaries = getCompactBoundariesForMajor(filesToCompact, now);
  boolean[] filesInWindow = new boolean[boundaries.size()];

  for (HStoreFile file: filesToCompact) {
    OptionalLong minTimestamp = file.getMinimumTimestamp();
    long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE;
    if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) {
      LOG.debug("Major compaction triggered on store " + this
        + "; for TTL maintenance");
      return true;
    }
    if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) {
      LOG.debug("Major compaction triggered on store " + this
        + ", because there are new files and time since last major compaction "
        + (now - lowTimestamp) + "ms");
      return true;
    }

    int lowerWindowIndex =
        Collections.binarySearch(boundaries, minTimestamp.orElse(Long.MAX_VALUE));
    int upperWindowIndex =
        Collections.binarySearch(boundaries, file.getMaximumTimestamp().orElse(Long.MAX_VALUE));
    // Handle boundary conditions and negative values of binarySearch
    lowerWindowIndex = (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 2) : lowerWindowIndex;
    upperWindowIndex = (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 2) : upperWindowIndex;
    if (lowerWindowIndex != upperWindowIndex) {
      LOG.debug("Major compaction triggered on store " + this + "; because file "
        + file.getPath() + " has data with timestamps cross window boundaries");
      return true;
    } else if (filesInWindow[upperWindowIndex]) {
      LOG.debug("Major compaction triggered on store " + this +
        "; because there are more than one file in some windows");
      return true;
    } else {
      filesInWindow[upperWindowIndex] = true;
    }
    hdfsBlocksDistribution.add(file.getHDFSBlockDistribution());
  }

  float blockLocalityIndex = hdfsBlocksDistribution
      .getBlockLocalityIndex(RSRpcServices.getHostname(comConf.conf, false));
  if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) {
    LOG.debug("Major compaction triggered on store " + this
      + "; to make hdfs blocks local, current blockLocalityIndex is "
      + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")");
    return true;
  }

  LOG.debug("Skipping major compaction of " + this +
    ", because the files are already major compacted");
  return false;
}
项目:hbase    文件:SortedCompactionPolicy.java   
/**
 * @param candidateFiles candidate files, ordered from oldest to newest by seqId. We rely on
 *   DefaultStoreFileManager to sort the files by seqId to guarantee contiguous compaction based
 *   on seqId for data consistency.
 * @return subset copy of candidate list that meets compaction criteria
 */
public CompactionRequestImpl selectCompaction(Collection<HStoreFile> candidateFiles,
    List<HStoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<HStoreFile> candidateSelection = new ArrayList<>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();

  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  // If we can't have all files, we cannot do major anyway
  boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
  if (!(forceMajor && isAllFiles)) {
    candidateSelection = skipLargeFiles(candidateSelection, mayUseOffPeak);
    isAllFiles = candidateFiles.size() == candidateSelection.size();
  }

  // Try a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested as a major compaction
  boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction)
      || (((forceMajor && isAllFiles) || shouldPerformMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
  // Or, if there are any references among the candidates.
  boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection);

  CompactionRequestImpl result = createCompactionRequest(candidateSelection,
    isTryingMajor || isAfterSplit, mayUseOffPeak, mayBeStuck);

  ArrayList<HStoreFile> filesToCompact = Lists.newArrayList(result.getFiles());
  removeExcessFiles(filesToCompact, isUserCompaction, isTryingMajor);
  result.updateFiles(filesToCompact);

  isAllFiles = (candidateFiles.size() == filesToCompact.size());
  result.setOffPeak(!filesToCompact.isEmpty() && !isAllFiles && mayUseOffPeak);
  result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);

  return result;
}
项目:hbase    文件:RatioBasedCompactionPolicy.java   
@Override
public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
  throws IOException {
  boolean result = false;
  long mcTime = getNextMajorCompactTime(filesToCompact);
  if (filesToCompact == null || filesToCompact.isEmpty() || mcTime == 0) {
    return result;
  }
  // TODO: Use better method for determining stamp of last major (HBASE-2990)
  long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
  long now = EnvironmentEdgeManager.currentTime();
  if (lowTimestamp > 0L && lowTimestamp < (now - mcTime)) {
    String regionInfo;
    if (this.storeConfigInfo != null && this.storeConfigInfo instanceof HStore) {
      regionInfo = ((HStore)this.storeConfigInfo).getRegionInfo().getRegionNameAsString();
    } else {
      regionInfo = this.toString();
    }
    // Major compaction time has elapsed.
    long cfTTL = HConstants.FOREVER;
    if (this.storeConfigInfo != null) {
       cfTTL = this.storeConfigInfo.getStoreFileTtl();
    }
    if (filesToCompact.size() == 1) {
      // Single file
      HStoreFile sf = filesToCompact.iterator().next();
      OptionalLong minTimestamp = sf.getMinimumTimestamp();
      long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE;
      if (sf.isMajorCompactionResult() && (cfTTL == Long.MAX_VALUE || oldest < cfTTL)) {
        float blockLocalityIndex =
          sf.getHDFSBlockDistribution().getBlockLocalityIndex(
          RSRpcServices.getHostname(comConf.conf, false));
        if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) {
          LOG.debug("Major compaction triggered on only store " + regionInfo
            + "; to make hdfs blocks local, current blockLocalityIndex is "
            + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")");
          result = true;
        } else {
          LOG.debug("Skipping major compaction of " + regionInfo
            + " because one (major) compacted file only, oldestTime " + oldest
            + "ms is < TTL=" + cfTTL + " and blockLocalityIndex is " + blockLocalityIndex
            + " (min " + comConf.getMinLocalityToForceCompact() + ")");
        }
      } else if (cfTTL != HConstants.FOREVER && oldest > cfTTL) {
        LOG.debug("Major compaction triggered on store " + regionInfo
          + ", because keyvalues outdated; time since last major compaction "
          + (now - lowTimestamp) + "ms");
        result = true;
      }
    } else {
      LOG.debug("Major compaction triggered on store " + regionInfo
        + "; time since last major compaction " + (now - lowTimestamp) + "ms");
      result = true;
    }
  }
  return result;
}
项目:PyroDB    文件:RatioBasedCompactionPolicy.java   
/**
 * @param candidateFiles candidate files, ordered from oldest to newest. All files in store.
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  // If we can't have all files, we cannot do major anyway
  boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
  if (!(forceMajor && isAllFiles)) {
    candidateSelection = skipLargeFiles(candidateSelection);
    isAllFiles = candidateFiles.size() == candidateSelection.size();
  }

  // Try a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested as a major compaction
  boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction)
      || (((forceMajor && isAllFiles) || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
  // Or, if there are any references among the candidates.
  boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection);
  if (!isTryingMajor && !isAfterSplit) {
    // We're are not compacting all files, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, isTryingMajor);
  // Now we have the final file list, so we can determine if we can do major/all files.
  isAllFiles = (candidateFiles.size() == candidateSelection.size());
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !isAllFiles && mayUseOffPeak);
  result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);
  return result;
}
项目:PyroDB    文件:RatioBasedCompactionPolicy.java   
public boolean isMajorCompaction(final Collection<StoreFile> filesToCompact)
    throws IOException {
  boolean result = false;
  long mcTime = getNextMajorCompactTime(filesToCompact);
  if (filesToCompact == null || filesToCompact.isEmpty() || mcTime == 0) {
    return result;
  }
  // TODO: Use better method for determining stamp of last major (HBASE-2990)
  long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
  long now = System.currentTimeMillis();
  if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) {
    // Major compaction time has elapsed.
    long cfTtl = this.storeConfigInfo.getStoreFileTtl();
    if (filesToCompact.size() == 1) {
      // Single file
      StoreFile sf = filesToCompact.iterator().next();
      Long minTimestamp = sf.getMinimumTimestamp();
      long oldest = (minTimestamp == null)
          ? Long.MIN_VALUE
          : now - minTimestamp.longValue();
      if (sf.isMajorCompaction() &&
          (cfTtl == HConstants.FOREVER || oldest < cfTtl)) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Skipping major compaction of " + this +
              " because one (major) compacted file only and oldestTime " +
              oldest + "ms is < ttl=" + cfTtl);
        }
      } else if (cfTtl != HConstants.FOREVER && oldest > cfTtl) {
        LOG.debug("Major compaction triggered on store " + this +
          ", because keyvalues outdated; time since last major compaction " +
          (now - lowTimestamp) + "ms");
        result = true;
      }
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Major compaction triggered on store " + this +
            "; time since last major compaction " + (now - lowTimestamp) + "ms");
      }
      result = true;
    }
  }
  return result;
}
项目:c5    文件:RatioBasedCompactionPolicy.java   
/**
 * @param candidateFiles candidate files, ordered from oldest to newest
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  long cfTtl = this.storeConfigInfo.getStoreFileTtl();
  if (!forceMajor) {
    // If there are expired files, only select them so that compaction deletes them
    if (comConf.shouldDeleteExpired() && (cfTtl != Long.MAX_VALUE)) {
      ArrayList<StoreFile> expiredSelection = selectExpiredStoreFiles(
          candidateSelection, EnvironmentEdgeManager.currentTimeMillis() - cfTtl);
      if (expiredSelection != null) {
        return new CompactionRequest(expiredSelection);
      }
    }
    candidateSelection = skipLargeFiles(candidateSelection);
  }

  // Force a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested
  // as a major compaction.
  // Or, if there are any references among the candidates.
  boolean majorCompaction = (
    (forceMajor && isUserCompaction)
    || ((forceMajor || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()))
    || StoreUtils.hasReferences(candidateSelection)
    );

  if (!majorCompaction) {
    // we're doing a minor compaction, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, majorCompaction);
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !majorCompaction && mayUseOffPeak);
  return result;
}
项目:c5    文件:RatioBasedCompactionPolicy.java   
public boolean isMajorCompaction(final Collection<StoreFile> filesToCompact)
    throws IOException {
  boolean result = false;
  long mcTime = getNextMajorCompactTime(filesToCompact);
  if (filesToCompact == null || filesToCompact.isEmpty() || mcTime == 0) {
    return result;
  }
  // TODO: Use better method for determining stamp of last major (HBASE-2990)
  long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
  long now = System.currentTimeMillis();
  if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) {
    // Major compaction time has elapsed.
    long cfTtl = this.storeConfigInfo.getStoreFileTtl();
    if (filesToCompact.size() == 1) {
      // Single file
      StoreFile sf = filesToCompact.iterator().next();
      Long minTimestamp = sf.getMinimumTimestamp();
      long oldest = (minTimestamp == null)
          ? Long.MIN_VALUE
          : now - minTimestamp.longValue();
      if (sf.isMajorCompaction() &&
          (cfTtl == HConstants.FOREVER || oldest < cfTtl)) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Skipping major compaction of " + this +
              " because one (major) compacted file only and oldestTime " +
              oldest + "ms is < ttl=" + cfTtl);
        }
      } else if (cfTtl != HConstants.FOREVER && oldest > cfTtl) {
        LOG.debug("Major compaction triggered on store " + this +
          ", because keyvalues outdated; time since last major compaction " +
          (now - lowTimestamp) + "ms");
        result = true;
      }
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Major compaction triggered on store " + this +
            "; time since last major compaction " + (now - lowTimestamp) + "ms");
      }
      result = true;
    }
  }
  return result;
}
项目:DominoHBase    文件:CompactionPolicy.java   
public boolean isMajorCompaction(final List<StoreFile> filesToCompact)
    throws IOException {
  boolean result = false;
  long mcTime = getNextMajorCompactTime(filesToCompact);
  if (filesToCompact == null || filesToCompact.isEmpty() || mcTime == 0) {
    return result;
  }
  // TODO: Use better method for determining stamp of last major (HBASE-2990)
  long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
  long now = System.currentTimeMillis();
  if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) {
    // Major compaction time has elapsed.
    long cfTtl = this.storeConfig.getStoreFileTtl();
    if (filesToCompact.size() == 1) {
      // Single file
      StoreFile sf = filesToCompact.get(0);
      Long minTimestamp = sf.getMinimumTimestamp();
      long oldest = (minTimestamp == null)
          ? Long.MIN_VALUE
          : now - minTimestamp.longValue();
      if (sf.isMajorCompaction() &&
          (cfTtl == HConstants.FOREVER || oldest < cfTtl)) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Skipping major compaction of " + this +
              " because one (major) compacted file only and oldestTime " +
              oldest + "ms is < ttl=" + cfTtl);
        }
      } else if (cfTtl != HConstants.FOREVER && oldest > cfTtl) {
        LOG.debug("Major compaction triggered on store " + this +
          ", because keyvalues outdated; time since last major compaction " +
          (now - lowTimestamp) + "ms");
        result = true;
      }
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Major compaction triggered on store " + this +
            "; time since last major compaction " + (now - lowTimestamp) + "ms");
      }
      result = true;
    }
  }
  return result;
}