Java 类org.apache.hadoop.hbase.regionserver.StoreFile 实例源码

项目:ditb    文件:HFileArchiver.java   
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
项目:ditb    文件:ExplicitFileListGenerator.java   
@Override
public final Iterator<List<StoreFile>> iterator() {
  return new Iterator<List<StoreFile>>() {
    private int nextIndex = 0;
    @Override
    public boolean hasNext() {
      return nextIndex < fileSizes.length;
    }

    @Override
    public List<StoreFile> next() {
      List<StoreFile> files =  createStoreFileList(fileSizes[nextIndex]);
      nextIndex += 1;
      return files;
    }

    @Override
    public void remove() {
    }
  };
}
项目:ditb    文件:ExploringCompactionPolicy.java   
/**
 * Check that all files satisfy the constraint
 *      FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) ) * Ratio.
 *
 * @param files List of store files to consider as a compaction candidate.
 * @param currentRatio The ratio to use.
 * @return a boolean if these files satisfy the ratio constraints.
 */
private boolean filesInRatio(final List<StoreFile> files, final double currentRatio) {
  if (files.size() < 2) {
    return true;
  }

  long totalFileSize = getTotalStoreSize(files);

  for (StoreFile file : files) {
    long singleFileSize = file.getReader().length();
    long sumAllOtherFileSizes = totalFileSize - singleFileSize;

    if (singleFileSize > sumAllOtherFileSizes * currentRatio) {
      return false;
    }
  }
  return true;
}
项目:ditb    文件:DefaultCompactor.java   
private CompactJobQueue.CompactJob createCompactJob(final CompactionRequest request,
    final Path writtenPath, HStore store) throws IOException {
  // check reference file, not supported yet!
  boolean needToRebuild = false;
  for (StoreFile sf : request.getFiles()) {
    if (sf.getPath().getName().indexOf(".") != -1 || sf.isReference()) {
      needToRebuild = true;
      break;
    }
  }
  CompactJobQueue.CompactJob job;
  if (needToRebuild) {
    job = new CompactJobQueue.RebuildCompactJob(store, request, writtenPath);
  } else {
    job = new CompactJobQueue.NormalCompactJob(store, request, writtenPath);
  }
  return job;
}
项目:ditb    文件:FIFOCompactionPolicy.java   
@Override
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    List<StoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {

  if(forceMajor){
    LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag.");
  }
  boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate selection to the parent policy.");
    return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, 
      mayUseOffPeak, forceMajor);
  }

  // Nothing to compact
  Collection<StoreFile> toCompact = getExpiredStores(candidateFiles, filesCompacting);
  CompactionRequest result = new CompactionRequest(toCompact);
  return result;
}
项目:ditb    文件:FIFOCompactionPolicy.java   
private  boolean hasExpiredStores(Collection<StoreFile> files) {
  long currentTime = EnvironmentEdgeManager.currentTime();
  for(StoreFile sf: files){
    // Check MIN_VERSIONS is in HStore removeUnneededFiles
    Long maxTs = sf.getReader().getMaxTimestamp();
    long maxTtl = storeConfigInfo.getStoreFileTtl();
    if(maxTs == null 
        || maxTtl == Long.MAX_VALUE
        || (currentTime - maxTtl < maxTs)){
      continue; 
    } else{
      return true;
    }
  }
  return false;
}
项目:ditb    文件:FIFOCompactionPolicy.java   
private  Collection<StoreFile> getExpiredStores(Collection<StoreFile> files,
  Collection<StoreFile> filesCompacting) {
  long currentTime = EnvironmentEdgeManager.currentTime();
  Collection<StoreFile> expiredStores = new ArrayList<StoreFile>();    
  for(StoreFile sf: files){
    // Check MIN_VERSIONS is in HStore removeUnneededFiles
    Long maxTs = sf.getReader().getMaxTimestamp();
    long maxTtl = storeConfigInfo.getStoreFileTtl();
    if(maxTs == null 
        || maxTtl == Long.MAX_VALUE
        || (currentTime - maxTtl < maxTs)){
      continue; 
    } else if(filesCompacting == null || filesCompacting.contains(sf) == false){
      expiredStores.add(sf);
    }
  }
  return expiredStores;
}
项目:ditb    文件:ConstantSizeFileListGenerator.java   
@Override
public final Iterator<List<StoreFile>> iterator() {
  return new Iterator<List<StoreFile>>() {
    private int count = 0;

    @Override
    public boolean hasNext() {
      return count < MAX_FILE_GEN_ITERS;
    }

    @Override
    public List<StoreFile> next() {
      count += 1;
      ArrayList<StoreFile> files = new ArrayList<StoreFile>(NUM_FILES_GEN);
      for (int i = 0; i < NUM_FILES_GEN; i++) {
        files.add(createMockStoreFile(FILESIZE));
      }
      return files;
    }

    @Override
    public void remove() {

    }
  };
}
项目:ditb    文件:RatioBasedCompactionPolicy.java   
/**
 * @param candidates pre-filtrate
 * @return filtered subset
 * take upto maxFilesToCompact from the start
 */
private ArrayList<StoreFile> removeExcessFiles(ArrayList<StoreFile> candidates,
    boolean isUserCompaction, boolean isMajorCompaction) {
  int excess = candidates.size() - comConf.getMaxFilesToCompact();
  if (excess > 0) {
    if (isMajorCompaction && isUserCompaction) {
      LOG.debug("Warning, compacting more than " + comConf.getMaxFilesToCompact() +
          " files because of a user-requested major compaction");
    } else {
      LOG.debug("Too many admissible files. Excluding " + excess
        + " files from compaction candidates");
      candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear();
    }
  }
  return candidates;
}
项目:ditb    文件:RatioBasedCompactionPolicy.java   
/**
 * @param filesToCompact
 * @return When to run next major compaction
 */
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        // Synchronized to ensure one user of random instance at a time.
        double rnd = -1;
        synchronized (this) {
          this.random.setSeed(seed);
          rnd = this.random.nextDouble();
        }
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // If seed is null, then no storefiles == no major compaction
      }
    }
  }
  return ret;
}
项目:ditb    文件:StripeCompactionPolicy.java   
private Pair<Long, Integer> estimateTargetKvs(Collection<StoreFile> files, double splitCount) {
  // If the size is larger than what we target, we don't want to split into proportionally
  // larger parts and then have to split again very soon. So, we will increase the multiplier
  // by one until we get small enough parts. E.g. 5Gb stripe that should have been split into
  // 2 parts when it was 3Gb will be split into 3x1.67Gb parts, rather than 2x2.5Gb parts.
  long totalSize = getTotalFileSize(files);
  long targetPartSize = config.getSplitPartSize();
  assert targetPartSize > 0 && splitCount > 0;
  double ratio = totalSize / (splitCount * targetPartSize); // ratio of real to desired size
  while (ratio > 1.0) {
    // Ratio of real to desired size if we increase the multiplier.
    double newRatio = totalSize / ((splitCount + 1.0) * targetPartSize);
    if ((1.0 / newRatio) >= ratio) break; // New ratio is < 1.0, but further than the last one.
    ratio = newRatio;
    splitCount += 1.0;
  }
  long kvCount = (long)(getTotalKvCount(files) / splitCount);
  return new Pair<Long, Integer>(kvCount, (int)Math.ceil(splitCount));
}
项目:ditb    文件:LMDIndexDirectStoreFileScanner.java   
private List<byte[]> initRowKeyList(FileSystem fileSystem, CacheConfig cacheConf,
    Configuration conf, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
    ScanRange.ScanRangeList rangeList) throws IOException {
  // init
  StoreFile bucketStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpBucketFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFile secondaryStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpSecondaryFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFileScanner bucketScanner = getStoreFileScanner(bucketStoreFile);
  StoreFileScanner secondaryScanner = getStoreFileScanner(secondaryStoreFile);
  // get hit buckets
  MDRange[] ranges = getRanges(indexFamilyMap, rangeList);
  List<LMDBucket> bucketList = getBucketRanges(bucketScanner, ranges);
  // scan rowkeys based on the buckets
  List<byte[]> rowkeyList = getRawRowkeyList(secondaryScanner, bucketList, ranges);
  // deinit
  bucketScanner.close();
  bucketStoreFile.closeReader(true);
  secondaryScanner.close();
  secondaryStoreFile.closeReader(true);
  return rowkeyList;
}
项目:ditb    文件:PerfTestCompactionPolicies.java   
@Test
public final void testSelection() throws Exception {
  long fileDiff = 0;
  for (List<StoreFile> storeFileList : generator) {
    List<StoreFile> currentFiles = new ArrayList<StoreFile>(18);
    for (StoreFile file : storeFileList) {
      currentFiles.add(file);
      currentFiles = runIteration(currentFiles);
    }
    fileDiff += (storeFileList.size() - currentFiles.size());
  }

  // print out tab delimited so that it can be used in excel/gdocs.
  System.out.println(
          cp.getClass().getSimpleName()
          + "\t" + fileGenClass.getSimpleName()
          + "\t" + max
          + "\t" + min
          + "\t" + ratio
          + "\t" + written
          + "\t" + fileDiff
  );
}
项目:ditb    文件:TestStripeCompactionPolicy.java   
@Test
public void testWithReferences() throws Exception {
  StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
  StripeCompactor sc = mock(StripeCompactor.class);
  StoreFile ref = createFile();
  when(ref.isReference()).thenReturn(true);
  StripeInformationProvider si = mock(StripeInformationProvider.class);
  Collection<StoreFile> sfs = al(ref, createFile());
  when(si.getStorefiles()).thenReturn(sfs);

  assertTrue(policy.needsCompactions(si, al()));
  StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false);
  assertEquals(si.getStorefiles(), scr.getRequest().getFiles());
  scr.execute(sc, NoLimitCompactionThroughputController.INSTANCE, null);
  verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(), aryEq(OPEN_KEY),
    aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY),
    any(NoLimitCompactionThroughputController.class), any(User.class));
}
项目:ditb    文件:PerfTestCompactionPolicies.java   
private List<StoreFile> runIteration(List<StoreFile> startingStoreFiles) throws IOException {

    List<StoreFile> storeFiles = new ArrayList<StoreFile>(startingStoreFiles);
    CompactionRequest req = cp.selectCompaction(
        storeFiles, new ArrayList<StoreFile>(), false, false, false);
    long newFileSize = 0;

    Collection<StoreFile> filesToCompact = req.getFiles();

    if (!filesToCompact.isEmpty()) {

      storeFiles = new ArrayList<StoreFile>(storeFiles);
      storeFiles.removeAll(filesToCompact);

      for (StoreFile storeFile : filesToCompact) {
        newFileSize += storeFile.getReader().length();
      }

      storeFiles.add(createMockStoreFileBytes(newFileSize));
    }

    written += newFileSize;
    return storeFiles;
  }
项目:ditb    文件:SnapshotManifest.java   
/**
 * Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
 * This is used by the "online snapshot" when the table is enabled.
 */
public void addRegion(final HRegion region) throws IOException {
  // 0. Get the ManifestBuilder/RegionVisitor
  RegionVisitor visitor = createRegionVisitor(desc);

  // 1. dump region meta info into the snapshot directory
  LOG.debug("Storing '" + region + "' region-info for snapshot.");
  Object regionData = visitor.regionOpen(region.getRegionInfo());
  monitor.rethrowException();

  // 2. iterate through all the stores in the region
  LOG.debug("Creating references for hfiles");

  for (Store store : region.getStores()) {
    // 2.1. build the snapshot reference for the store
    Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
    monitor.rethrowException();

    List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
    if (LOG.isDebugEnabled()) {
      LOG.debug("Adding snapshot references for " + storeFiles  + " hfiles");
    }

    // 2.2. iterate through all the store's files and create "references".
    for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
      StoreFile storeFile = storeFiles.get(i);
      monitor.rethrowException();

      // create "reference" to this store file.
      LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath());
      visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
    }
    visitor.familyClose(regionData, familyData);
  }
  visitor.regionClose(regionData);
}
项目:ditb    文件:Compactor.java   
/**
 * Creates file scanners for compaction.
 *
 * @param filesToCompact Files.
 * @return Scanners.
 */
protected List<StoreFileScanner> createFileScanners(final Collection<StoreFile> filesToCompact,
    long smallestReadPoint, boolean useDropBehind) throws IOException {
  return StoreFileScanner.getScannersForStoreFiles(filesToCompact,
  /* cache blocks = */false,
  /* use pread = */false,
  /* is compaction */true,
  /* use Drop Behind */useDropBehind, smallestReadPoint);
}
项目:ditb    文件:CompactionRequest.java   
/**
 * Combines the request with other request. Coprocessors subclassing CR may override
 * this if they want to do clever things based on CompactionPolicy selection that
 * is passed to this method via "other". The default implementation just does a copy.
 * @param other Request to combine with.
 * @return The result (may be "this" or "other").
 */
public CompactionRequest combineWith(CompactionRequest other) {
  this.filesToCompact = new ArrayList<StoreFile>(other.getFiles());
  this.isOffPeak = other.isOffPeak;
  this.isMajor = other.isMajor;
  this.priority = other.priority;
  this.selectionTime = other.selectionTime;
  this.timeInNanos = other.timeInNanos;
  this.regionName = other.regionName;
  this.storeName = other.storeName;
  this.totalSize = other.totalSize;
  return this;
}
项目:ditb    文件:TestStripeCompactionPolicy.java   
/**
 * Verify arbitrary compaction.
 * @param policy Policy to test.
 * @param si Stripe information pre-set with stripes to test.
 * @param sfs Files that should be compacted.
 * @param dropDeletes Whether to drop deletes from compaction range.
 * @param count Expected # of resulting stripes, null if not checked.
 * @param size Expected target stripe size, null if not checked.
 * @param start Left boundary of the compaction.
 * @param righr Right boundary of the compaction.
 */
private void verifyCompaction(StripeCompactionPolicy policy, StripeInformationProvider si,
    Collection<StoreFile> sfs, Boolean dropDeletes, Integer count, Long size,
    byte[] start, byte[] end, boolean needsCompaction) throws IOException {
  StripeCompactor sc = mock(StripeCompactor.class);
  assertTrue(!needsCompaction || policy.needsCompactions(si, al()));
  StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false);
  verifyCollectionsEqual(sfs, scr.getRequest().getFiles());
  scr.execute(sc, NoLimitCompactionThroughputController.INSTANCE, null);
  verify(sc, times(1)).compact(eq(scr.getRequest()),
    count == null ? anyInt() : eq(count.intValue()),
    size == null ? anyLong() : eq(size.longValue()), aryEq(start), aryEq(end),
    dropDeletesMatcher(dropDeletes, start), dropDeletesMatcher(dropDeletes, end),
    any(NoLimitCompactionThroughputController.class), any(User.class));
}
项目:ditb    文件:MockStoreFileGenerator.java   
protected List<StoreFile> createStoreFileList(final int[] fs) {
  List<StoreFile> storeFiles = new LinkedList<StoreFile>();
  for (int fileSize : fs) {
    storeFiles.add(createMockStoreFile(fileSize));
  }
  return storeFiles;
}
项目:ditb    文件:ExploringCompactionPolicy.java   
/**
 * Find the total size of a list of store files.
 * @param potentialMatchFiles StoreFile list.
 * @return Sum of StoreFile.getReader().length();
 */
private long getTotalStoreSize(final List<StoreFile> potentialMatchFiles) {
  long size = 0;

  for (StoreFile s:potentialMatchFiles) {
    size += s.getReader().length();
  }
  return size;
}
项目:ditb    文件:FIFOCompactionPolicy.java   
@Override
public boolean isMajorCompaction(Collection<StoreFile> filesToCompact) throws IOException {
  boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.isMajorCompaction(filesToCompact);
  }
  return false;
}
项目:ditb    文件:FIFOCompactionPolicy.java   
@Override
public boolean needsCompaction(Collection<StoreFile> storeFiles, 
    List<StoreFile> filesCompacting) {  
  boolean isAfterSplit = StoreUtils.hasReferences(storeFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.needsCompaction(storeFiles, filesCompacting);
  }
  return hasExpiredStores(storeFiles);
}
项目:ditb    文件:RatioBasedCompactionPolicy.java   
/**
 * @param candidates pre-filtrate
 * @return filtered subset
 * exclude all files above maxCompactSize
 * Also save all references. We MUST compact them
 */
private ArrayList<StoreFile> skipLargeFiles(ArrayList<StoreFile> candidates, 
  boolean mayUseOffpeak) {
  int pos = 0;
  while (pos < candidates.size() && !candidates.get(pos).isReference()
    && (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak))) {
    ++pos;
  }
  if (pos > 0) {
    LOG.debug("Some files are too large. Excluding " + pos
        + " files from compaction candidates");
    candidates.subList(0, pos).clear();
  }
  return candidates;
}
项目:ditb    文件:RatioBasedCompactionPolicy.java   
/**
 * @param candidates pre-filtrate
 * @return filtered subset
 * exclude all bulk load files if configured
 */
private ArrayList<StoreFile> filterBulk(ArrayList<StoreFile> candidates) {
  candidates.removeAll(Collections2.filter(candidates,
      new Predicate<StoreFile>() {
        @Override
        public boolean apply(StoreFile input) {
          return input.excludeFromMinorCompaction();
        }
      }));
  return candidates;
}
项目:ditb    文件:TestStripeCompactionPolicy.java   
private static StoreFile createFile(long size) throws Exception {
  StoreFile sf = mock(StoreFile.class);
  when(sf.getPath()).thenReturn(new Path("moo"));
  StoreFile.Reader r = mock(StoreFile.Reader.class);
  when(r.getEntries()).thenReturn(size);
  when(r.length()).thenReturn(size);
  when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
  when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
  when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong())).thenReturn(
    mock(StoreFileScanner.class));
  when(sf.getReader()).thenReturn(r);
  when(sf.createReader(anyBoolean())).thenReturn(r);
  when(sf.createReader()).thenReturn(r);
  return sf;
}
项目:ditb    文件:StripeCompactionPolicy.java   
public List<StoreFile> preSelectFilesForCoprocessor(StripeInformationProvider si,
    List<StoreFile> filesCompacting) {
  // We sincerely hope nobody is messing with us with their coprocessors.
  // If they do, they are very likely to shoot themselves in the foot.
  // We'll just exclude all the filesCompacting from the list.
  ArrayList<StoreFile> candidateFiles = new ArrayList<StoreFile>(si.getStorefiles());
  candidateFiles.removeAll(filesCompacting);
  return candidateFiles;
}
项目:ditb    文件:StripeCompactionPolicy.java   
public boolean needsCompactions(StripeInformationProvider si, List<StoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
项目:ditb    文件:StripeCompactionPolicy.java   
/**
 * @param si StoreFileManager.
 * @return Whether any stripe potentially needs compaction.
 */
protected boolean needsSingleStripeCompaction(StripeInformationProvider si) {
  int minFiles = this.config.getStripeCompactMinFiles();
  for (List<StoreFile> stripe : si.getStripes()) {
    if (stripe.size() >= minFiles) return true;
  }
  return false;
}
项目:ditb    文件:StripeCompactionPolicy.java   
/**
 * Selects the compaction of a single stripe using default policy.
 * @param sfs Files.
 * @param allFilesOnly Whether a compaction of all-or-none files is needed.
 * @return The resulting selection.
 */
private List<StoreFile> selectSimpleCompaction(
    List<StoreFile> sfs, boolean allFilesOnly, boolean isOffpeak) {
  int minFilesLocal = Math.max(
      allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles());
  int maxFilesLocal = Math.max(this.config.getStripeCompactMaxFiles(), minFilesLocal);
  return stripePolicy.applyCompactionPolicy(sfs, false, isOffpeak, minFilesLocal, maxFilesLocal);
}
项目:ditb    文件:StripeCompactionPolicy.java   
/**
 * Selects the compaction that compacts all files (to be removed later).
 * @param si StoreFileManager.
 * @param targetStripeCount Target stripe count.
 * @param targetSize Target stripe size.
 * @return The compaction.
 */
private StripeCompactionRequest selectCompactionOfAllFiles(StripeInformationProvider si,
    int targetStripeCount, long targetSize) {
  Collection<StoreFile> allFiles = si.getStorefiles();
  SplitStripeCompactionRequest request = new SplitStripeCompactionRequest(
      allFiles, OPEN_KEY, OPEN_KEY, targetStripeCount, targetSize);
  request.setMajorRangeFull();
  LOG.debug("Selecting a compaction that includes all " + allFiles.size() + " files");
  return request;
}
项目:ditb    文件:StripeCompactionPolicy.java   
private StripeCompactionRequest selectNewStripesCompaction(StripeInformationProvider si) {
  List<StoreFile> l0Files = si.getLevel0Files();
  Pair<Long, Integer> kvsAndCount = estimateTargetKvs(l0Files, config.getInitialCount());
  LOG.debug("Creating " + kvsAndCount.getSecond() + " initial stripes with "
      + kvsAndCount.getFirst() + " kvs each via L0 compaction of " + l0Files.size() + " files");
  SplitStripeCompactionRequest request = new SplitStripeCompactionRequest(
      si.getLevel0Files(), OPEN_KEY, OPEN_KEY, kvsAndCount.getSecond(), kvsAndCount.getFirst());
  request.setMajorRangeFull(); // L0 only, can drop deletes.
  return request;
}
项目:ditb    文件:StripeCompactionPolicy.java   
private static long getTotalKvCount(final Collection<StoreFile> candidates) {
  long totalSize = 0;
  for (StoreFile storeFile : candidates) {
    totalSize += storeFile.getReader().getEntries();
  }
  return totalSize;
}
项目:ditb    文件:TestStripeCompactionPolicy.java   
/**
 * This method actually does all the work.
 */
private static StripeInformationProvider createStripesWithFiles(List<byte[]> boundaries,
    List<List<StoreFile>> stripeFiles, List<StoreFile> l0Files) throws Exception {
  ArrayList<ImmutableList<StoreFile>> stripes = new ArrayList<ImmutableList<StoreFile>>();
  ArrayList<byte[]> boundariesList = new ArrayList<byte[]>();
  StripeInformationProvider si = mock(StripeInformationProvider.class);
  if (!stripeFiles.isEmpty()) {
    assert stripeFiles.size() == (boundaries.size() + 1);
    boundariesList.add(OPEN_KEY);
    for (int i = 0; i <= boundaries.size(); ++i) {
      byte[] startKey = ((i == 0) ? OPEN_KEY : boundaries.get(i - 1));
      byte[] endKey = ((i == boundaries.size()) ? OPEN_KEY : boundaries.get(i));
      boundariesList.add(endKey);
      for (StoreFile sf : stripeFiles.get(i)) {
        setFileStripe(sf, startKey, endKey);
      }
      stripes.add(ImmutableList.copyOf(stripeFiles.get(i)));
      when(si.getStartRow(eq(i))).thenReturn(startKey);
      when(si.getEndRow(eq(i))).thenReturn(endKey);
    }
  }
  ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<StoreFile>();
  sfs.addAllSublists(stripes);
  sfs.addSublist(l0Files);
  when(si.getStorefiles()).thenReturn(sfs);
  when(si.getStripes()).thenReturn(stripes);
  when(si.getStripeBoundaries()).thenReturn(boundariesList);
  when(si.getStripeCount()).thenReturn(stripes.size());
  when(si.getLevel0Files()).thenReturn(l0Files);
  return si;
}
项目:ditb    文件:LCIndexWriter.java   
private StoreFile.Writer createIndexWriter(byte[] family, byte[] qualifier, int maxKeyCount)
    throws IOException {
  Path iFilePath = indexParameters.getTmpIFilePath(hdfsTmpPath, qualifier);
  StoreFile.Writer writer = store
      .createTmpIFileWriter(maxKeyCount, store.family.getCompression(), false, true, false, false,
          iFilePath);
  if (tracker != null) writer.setTimeRangeTracker(tracker);
  return writer;
}
项目:ditb    文件:StoreFileIndexScanner.java   
/**
 * Return an array of scanners corresponding to the given set of store files.
 */
public static List<StoreFileIndexScanner> getScannersForStoreFiles(Collection<StoreFile> files,
    boolean cacheBlocks, boolean usePread) throws IOException {
  List<StoreFileIndexScanner> scanners = new ArrayList<StoreFileIndexScanner>(files.size());
  for (StoreFile sf : files) {
    IndexReader ir = sf.getIndexReader();
    if (ir != null) {
      scanners.add(ir.getStoreFileIndexScanner(cacheBlocks, usePread));
    }
  }
  return scanners;
}
项目:ditb    文件:ArchiveJobQueue.java   
public ArchiveJob(HStore store, Collection<StoreFile> compactedFiles, Path hdfsPath,
    CompleteCompactionJob completeCompactionJob, boolean isMajor) {
  super(ArchiveJobQueue.getInstance().getJobQueueName());
  this.store = store;
  compactedHFiles = new ArrayList<>(compactedFiles.size());
  for (StoreFile sf : compactedFiles)
    compactedHFiles.add(sf.getPath());
  this.prevCompleteCompactJob = completeCompactionJob;
  this.isMajor = isMajor;
  hdfsPathGenerated = hdfsPath;
  printMessage(
      "ArchiveJob construction, compaction generates hdfs HFile: " + hdfsPathGenerated);
}
项目:ditb    文件:CompactJobQueue.java   
public RebuildCompactJob(HStore store, CompactionRequest request, Path writtenPath)
    throws IOException {
  super(store, request, writtenPath);
  StringBuilder sb = new StringBuilder();
  sb.append("RebuildCompactJob construction, hdfsPath: ").append(tmpHDFSPath);
  sb.append(", with ").append(request.getFiles().size())
      .append(" store files compacted, they are: ");
  for (StoreFile sf : request.getFiles()) {
    sb.append(sf.getPath()).append(", ");
  }
  printMessage(sb.toString());
}
项目:ditb    文件:CompactJobQueue.java   
private void winterTestingStoreFile(StoreFile sf) throws IOException {
  StoreFileScanner compactedFileScanner = sf.getReader().getStoreFileScanner(false, false);
  KeyValue startKey =
      KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP);
  compactedFileScanner.seek(startKey);
  KeyValue kv;
  int n = 0;
  while ((kv = (KeyValue) compactedFileScanner.next()) != null) {
    LOG.info("LCDBG, show kv: " + Bytes.toInt(kv.getRow()));
    ++n;
  }
  LOG.info("LCDBG, reader has: " + n + " in " + sf.getPath());
  compactedFileScanner.close();
}
项目:ditb    文件:LMDIndexDirectStoreFileScanner.java   
public LMDIndexDirectStoreFileScanner(StoreFile file, boolean canUseDrop, boolean cacheBlocks,
    boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, long readPt,
    boolean isPrimaryReplica, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
    ScanRange.ScanRangeList rangeList, FileSystem fileSystem, CacheConfig cacheConf,
    Configuration conf, boolean rowkeyAsResult) throws IOException {
  this.canUseDrop = canUseDrop;
  this.cacheBlocks = cacheBlocks;
  this.usePread = usePread;
  this.isCompaction = isCompaction;
  this.matcher = matcher;
  this.readPt = readPt;
  this.isPrimaryReplica = isPrimaryReplica;
  this.file = file;
  this.rowkeyAsResult = rowkeyAsResult;
  try {
    rawRowkeyList = initRowKeyList(fileSystem, cacheConf, conf, indexFamilyMap, rangeList);
    System.out.println("get " + rawRowkeyList.size() + " secondary rowkeys from " + this.file
        + ", now rowkeyAsResult=" + rowkeyAsResult);
    if (!rowkeyAsResult) {
      Collections.sort(rawRowkeyList, Bytes.BYTES_COMPARATOR);
      rawDataScanner = getStoreFileScanner(file);
    }
  } catch (IOException e) {
    System.out.println("error in LMDIndexStoreFileScanner, " + e);
    throw e;
  }
}