Java 类org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter 实例源码

项目:ditb    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequest request, List<byte[]> targetBoundaries,
    byte[] majorRangeFromRow, byte[] majorRangeToRow,
    CompactionThroughputController throughputController, User user) throws IOException {
  if (LOG.isDebugEnabled()) {
    StringBuilder sb = new StringBuilder();
    sb.append("Executing compaction with " + targetBoundaries.size() + " boundaries:");
    for (byte[] tb : targetBoundaries) {
      sb.append(" [").append(Bytes.toString(tb)).append("]");
    }
    LOG.debug(sb.toString());
  }
  StripeMultiFileWriter writer = new StripeMultiFileWriter.BoundaryMultiWriter(
      targetBoundaries, majorRangeFromRow, majorRangeToRow);
  return compactInternal(writer, request, majorRangeFromRow, majorRangeToRow,
    throughputController, user);
}
项目:ditb    文件:TestStripeCompactionPolicy.java   
/** Verify arbitrary flush. */
protected void verifyFlush(StripeCompactionPolicy policy, StripeInformationProvider si,
    KeyValue[] input, KeyValue[][] expected, byte[][] boundaries) throws IOException {
  StoreFileWritersCapture writers = new StoreFileWritersCapture();
  StripeStoreFlusher.StripeFlushRequest req = policy.selectFlush(si, input.length);
  StripeMultiFileWriter mw = req.createWriter();
  mw.init(null, writers, new KeyValue.KVComparator());
  for (KeyValue kv : input) {
    mw.append(kv);
  }
  boolean hasMetadata = boundaries != null;
  mw.commitWriters(0, false);
  writers.verifyKvs(expected, true, hasMetadata);
  if (hasMetadata) {
    writers.verifyBoundaries(boundaries);
  }
}
项目:pbase    文件:TestStripeCompactionPolicy.java   
/** Verify arbitrary flush. */
protected void verifyFlush(StripeCompactionPolicy policy, StripeInformationProvider si,
    KeyValue[] input, KeyValue[][] expected, byte[][] boundaries) throws IOException {
  StoreFileWritersCapture writers = new StoreFileWritersCapture();
  StripeStoreFlusher.StripeFlushRequest req = policy.selectFlush(si, input.length);
  StripeMultiFileWriter mw = req.createWriter();
  mw.init(null, writers, new KeyValue.KVComparator());
  for (KeyValue kv : input) {
    mw.append(kv);
  }
  boolean hasMetadata = boundaries != null;
  mw.commitWriters(0, false);
  writers.verifyKvs(expected, true, hasMetadata);
  if (hasMetadata) {
    writers.verifyBoundaries(boundaries);
  }
}
项目:HIndex    文件:TestStripeCompactionPolicy.java   
/** Verify arbitrary flush. */
protected void verifyFlush(StripeCompactionPolicy policy, StripeInformationProvider si,
    KeyValue[] input, KeyValue[][] expected, byte[][] boundaries) throws IOException {
  StoreFileWritersCapture writers = new StoreFileWritersCapture();
  StripeStoreFlusher.StripeFlushRequest req = policy.selectFlush(si, input.length);
  StripeMultiFileWriter mw = req.createWriter();
  mw.init(null, writers, new KeyValue.KVComparator());
  for (KeyValue kv : input) {
    mw.append(kv);
  }
  boolean hasMetadata = boundaries != null;
  mw.commitWriters(0, false);
  writers.verifyKvs(expected, true, hasMetadata);
  if (hasMetadata) {
    writers.verifyBoundaries(boundaries);
  }
}
项目:hbase    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequestImpl request, final List<byte[]> targetBoundaries,
    final byte[] majorRangeFromRow, final byte[] majorRangeToRow,
    ThroughputController throughputController, User user) throws IOException {
  if (LOG.isDebugEnabled()) {
    StringBuilder sb = new StringBuilder();
    sb.append("Executing compaction with " + targetBoundaries.size() + " boundaries:");
    for (byte[] tb : targetBoundaries) {
      sb.append(" [").append(Bytes.toString(tb)).append("]");
    }
    LOG.debug(sb.toString());
  }
  return compact(request, new StripeInternalScannerFactory(majorRangeFromRow, majorRangeToRow),
    new CellSinkFactory<StripeMultiFileWriter>() {

      @Override
      public StripeMultiFileWriter createWriter(InternalScanner scanner, FileDetails fd,
          boolean shouldDropBehind) throws IOException {
        StripeMultiFileWriter writer = new StripeMultiFileWriter.BoundaryMultiWriter(
            store.getComparator(), targetBoundaries, majorRangeFromRow, majorRangeToRow);
        initMultiWriter(writer, scanner, fd, shouldDropBehind);
        return writer;
      }
    }, throughputController, user);
}
项目:hbase    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequestImpl request, final int targetCount, final long targetSize,
    final byte[] left, final byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow,
    ThroughputController throughputController, User user) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug(
      "Executing compaction with " + targetSize + " target file size, no more than " + targetCount
          + " files, in [" + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range");
  }
  return compact(request, new StripeInternalScannerFactory(majorRangeFromRow, majorRangeToRow),
    new CellSinkFactory<StripeMultiFileWriter>() {

      @Override
      public StripeMultiFileWriter createWriter(InternalScanner scanner, FileDetails fd,
          boolean shouldDropBehind) throws IOException {
        StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter(
            store.getComparator(), targetCount, targetSize, left, right);
        initMultiWriter(writer, scanner, fd, shouldDropBehind);
        return writer;
      }
    }, throughputController, user);
}
项目:hbase    文件:TestStripeCompactionPolicy.java   
/** Verify arbitrary flush. */
protected void verifyFlush(StripeCompactionPolicy policy, StripeInformationProvider si,
    KeyValue[] input, KeyValue[][] expected, byte[][] boundaries) throws IOException {
  StoreFileWritersCapture writers = new StoreFileWritersCapture();
  StripeStoreFlusher.StripeFlushRequest req = policy.selectFlush(CellComparatorImpl.COMPARATOR, si,
    input.length);
  StripeMultiFileWriter mw = req.createWriter();
  mw.init(null, writers);
  for (KeyValue kv : input) {
    mw.append(kv);
  }
  boolean hasMetadata = boundaries != null;
  mw.commitWriters(0, false);
  writers.verifyKvs(expected, true, hasMetadata);
  if (hasMetadata) {
    writers.verifyBoundaries(boundaries);
  }
}
项目:PyroDB    文件:TestStripeCompactionPolicy.java   
/** Verify arbitrary flush. */
protected void verifyFlush(StripeCompactionPolicy policy, StripeInformationProvider si,
    KeyValue[] input, KeyValue[][] expected, byte[][] boundaries) throws IOException {
  StoreFileWritersCapture writers = new StoreFileWritersCapture();
  StripeStoreFlusher.StripeFlushRequest req = policy.selectFlush(si, input.length);
  StripeMultiFileWriter mw = req.createWriter();
  mw.init(null, writers, new KeyValue.KVComparator());
  for (KeyValue kv : input) {
    mw.append(kv);
  }
  boolean hasMetadata = boundaries != null;
  mw.commitWriters(0, false);
  writers.verifyKvs(expected, true, hasMetadata);
  if (hasMetadata) {
    writers.verifyBoundaries(boundaries);
  }
}
项目:ditb    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequest request, int targetCount, long targetSize,
    byte[] left, byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow,
    CompactionThroughputController throughputController, User user) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing compaction with " + targetSize
        + " target file size, no more than " + targetCount + " files, in ["
        + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range");
  }
  StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter(
      targetCount, targetSize, left, right);
  return compactInternal(writer, request, majorRangeFromRow, majorRangeToRow,
    throughputController, user);
}
项目:pbase    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequest request, List<byte[]> targetBoundaries,
    byte[] majorRangeFromRow, byte[] majorRangeToRow) throws IOException {
  if (LOG.isDebugEnabled()) {
    StringBuilder sb = new StringBuilder();
    sb.append("Executing compaction with " + targetBoundaries.size() + " boundaries:");
    for (byte[] tb : targetBoundaries) {
      sb.append(" [").append(Bytes.toString(tb)).append("]");
    }
    LOG.debug(sb.toString());
  }
  StripeMultiFileWriter writer = new StripeMultiFileWriter.BoundaryMultiWriter(
      targetBoundaries, majorRangeFromRow, majorRangeToRow);
  return compactInternal(writer, request, majorRangeFromRow, majorRangeToRow);
}
项目:pbase    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequest request, int targetCount, long targetSize,
    byte[] left, byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing compaction with " + targetSize
        + " target file size, no more than " + targetCount + " files, in ["
        + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range");
  }
  StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter(
      targetCount, targetSize, left, right);
  return compactInternal(writer, request, majorRangeFromRow, majorRangeToRow);
}
项目:pbase    文件:StripeStoreFlusher.java   
private StripeMultiFileWriter.WriterFactory createWriterFactory(
    final TimeRangeTracker tracker, final long kvCount) {
  return new StripeMultiFileWriter.WriterFactory() {
    @Override
    public Writer createWriter() throws IOException {
      StoreFile.Writer writer = store.createWriterInTmp(
          kvCount, store.getFamily().getCompression(), false, true, true);
      writer.setTimeRangeTracker(tracker);
      return writer;
    }
  };
}
项目:pbase    文件:StripeStoreFlusher.java   
@VisibleForTesting
public StripeMultiFileWriter createWriter() throws IOException {
  StripeMultiFileWriter writer =
      new StripeMultiFileWriter.SizeMultiWriter(1, Long.MAX_VALUE, OPEN_KEY, OPEN_KEY);
  writer.setNoStripeMetadata();
  return writer;
}
项目:HIndex    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequest request, List<byte[]> targetBoundaries,
    byte[] majorRangeFromRow, byte[] majorRangeToRow) throws IOException {
  if (LOG.isDebugEnabled()) {
    StringBuilder sb = new StringBuilder();
    sb.append("Executing compaction with " + targetBoundaries.size() + " boundaries:");
    for (byte[] tb : targetBoundaries) {
      sb.append(" [").append(Bytes.toString(tb)).append("]");
    }
    LOG.debug(sb.toString());
  }
  StripeMultiFileWriter writer = new StripeMultiFileWriter.BoundaryMultiWriter(
      targetBoundaries, majorRangeFromRow, majorRangeToRow);
  return compactInternal(writer, request, majorRangeFromRow, majorRangeToRow);
}
项目:HIndex    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequest request, int targetCount, long targetSize,
    byte[] left, byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing compaction with " + targetSize
        + " target file size, no more than " + targetCount + " files, in ["
        + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range");
  }
  StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter(
      targetCount, targetSize, left, right);
  return compactInternal(writer, request, majorRangeFromRow, majorRangeToRow);
}
项目:HIndex    文件:StripeStoreFlusher.java   
private StripeMultiFileWriter.WriterFactory createWriterFactory(
    final TimeRangeTracker tracker, final long kvCount) {
  return new StripeMultiFileWriter.WriterFactory() {
    @Override
    public Writer createWriter() throws IOException {
      StoreFile.Writer writer = store.createWriterInTmp(
          kvCount, store.getFamily().getCompression(), false, true, true);
      writer.setTimeRangeTracker(tracker);
      return writer;
    }
  };
}
项目:HIndex    文件:StripeStoreFlusher.java   
@VisibleForTesting
public StripeMultiFileWriter createWriter() throws IOException {
  StripeMultiFileWriter writer =
      new StripeMultiFileWriter.SizeMultiWriter(1, Long.MAX_VALUE, OPEN_KEY, OPEN_KEY);
  writer.setNoStripeMetadata();
  return writer;
}
项目:hbase    文件:StripeCompactor.java   
@Override
protected List<Path> commitWriter(StripeMultiFileWriter writer, FileDetails fd,
    CompactionRequestImpl request) throws IOException {
  List<Path> newFiles = writer.commitWriters(fd.maxSeqId, request.isMajor());
  assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata.";
  return newFiles;
}
项目:PyroDB    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequest request, List<byte[]> targetBoundaries,
    byte[] majorRangeFromRow, byte[] majorRangeToRow) throws IOException {
  if (LOG.isDebugEnabled()) {
    StringBuilder sb = new StringBuilder();
    sb.append("Executing compaction with " + targetBoundaries.size() + " boundaries:");
    for (byte[] tb : targetBoundaries) {
      sb.append(" [").append(Bytes.toString(tb)).append("]");
    }
    LOG.debug(sb.toString());
  }
  StripeMultiFileWriter writer = new StripeMultiFileWriter.BoundaryMultiWriter(
      targetBoundaries, majorRangeFromRow, majorRangeToRow);
  return compactInternal(writer, request, majorRangeFromRow, majorRangeToRow);
}
项目:PyroDB    文件:StripeCompactor.java   
public List<Path> compact(CompactionRequest request, int targetCount, long targetSize,
    byte[] left, byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing compaction with " + targetSize
        + " target file size, no more than " + targetCount + " files, in ["
        + Bytes.toString(left) + "] [" + Bytes.toString(right) + "] range");
  }
  StripeMultiFileWriter writer = new StripeMultiFileWriter.SizeMultiWriter(
      targetCount, targetSize, left, right);
  return compactInternal(writer, request, majorRangeFromRow, majorRangeToRow);
}
项目:PyroDB    文件:StripeStoreFlusher.java   
private StripeMultiFileWriter.WriterFactory createWriterFactory(
    final TimeRangeTracker tracker, final long kvCount) {
  return new StripeMultiFileWriter.WriterFactory() {
    @Override
    public Writer createWriter() throws IOException {
      StoreFile.Writer writer = store.createWriterInTmp(
          kvCount, store.getFamily().getCompression(), false, true, true);
      writer.setTimeRangeTracker(tracker);
      return writer;
    }
  };
}
项目:PyroDB    文件:StripeStoreFlusher.java   
@VisibleForTesting
public StripeMultiFileWriter createWriter() throws IOException {
  StripeMultiFileWriter writer =
      new StripeMultiFileWriter.SizeMultiWriter(1, Long.MAX_VALUE, OPEN_KEY, OPEN_KEY);
  writer.setNoStripeMetadata();
  return writer;
}
项目:pbase    文件:StripeCompactor.java   
private List<Path> compactInternal(StripeMultiFileWriter mw, CompactionRequest request,
    byte[] majorRangeFromRow, byte[] majorRangeToRow) throws IOException {
  final Collection<StoreFile> filesToCompact = request.getFiles();
  final FileDetails fd = getFileDetails(filesToCompact, request.isMajor());
  this.progress = new CompactionProgress(fd.maxKeyCount);

  long smallestReadPoint = getSmallestReadPoint();
  List<StoreFileScanner> scanners = createFileScanners(filesToCompact, smallestReadPoint);

  boolean finished = false;
  InternalScanner scanner = null;
  boolean cleanSeqId = false;
  try {
    // Get scanner to use.
    ScanType coprocScanType = ScanType.COMPACT_RETAIN_DELETES;
    scanner = preCreateCoprocScanner(request, coprocScanType, fd.earliestPutTs, scanners);
    if (scanner == null) {
      scanner = (majorRangeFromRow == null)
          ? createScanner(store, scanners,
              ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, fd.earliestPutTs)
          : createScanner(store, scanners,
              smallestReadPoint, fd.earliestPutTs, majorRangeFromRow, majorRangeToRow);
    }
    scanner = postCreateCoprocScanner(request, coprocScanType, scanner);
    if (scanner == null) {
      // NULL scanner returned from coprocessor hooks means skip normal processing.
      return new ArrayList<Path>();
    }

    // Create the writer factory for compactions.
    if(fd.minSeqIdToKeep > 0) {
      smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint);
      cleanSeqId = true;
    }

    final Compression.Algorithm compression = store.getFamily().getCompactionCompression();
    StripeMultiFileWriter.WriterFactory factory = new StripeMultiFileWriter.WriterFactory() {
      @Override
      public Writer createWriter() throws IOException {
        return store.createWriterInTmp(
            fd.maxKeyCount, compression, true, true, fd.maxTagsLength > 0);
      }
    };

    // Prepare multi-writer, and perform the compaction using scanner and writer.
    // It is ok here if storeScanner is null.
    StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
    mw.init(storeScanner, factory, store.getComparator());
    finished = performCompaction(scanner, mw, smallestReadPoint, cleanSeqId);
    if (!finished) {
      throw new InterruptedIOException( "Aborting compaction of store " + store +
          " in region " + store.getRegionInfo().getRegionNameAsString() +
          " because it was interrupted.");
    }
  } finally {
    if (scanner != null) {
      try {
        scanner.close();
      } catch (Throwable t) {
        // Don't fail the compaction if this fails.
        LOG.error("Failed to close scanner after compaction.", t);
      }
    }
    if (!finished) {
      for (Path leftoverFile : mw.abortWriters()) {
        try {
          store.getFileSystem().delete(leftoverFile, false);
        } catch (Exception ex) {
          LOG.error("Failed to delete the leftover file after an unfinished compaction.", ex);
        }
      }
    }
  }

  assert finished : "We should have exited the method on all error paths";
  List<Path> newFiles = mw.commitWriters(fd.maxSeqId, request.isMajor());
  assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata.";
  return newFiles;
}
项目:pbase    文件:StripeStoreFlusher.java   
@Override
public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum,
    MonitoredTask status) throws IOException {
  List<Path> result = new ArrayList<Path>();
  int cellsCount = snapshot.getCellsCount();
  if (cellsCount == 0) return result; // don't flush if there are no entries

  long smallestReadPoint = store.getSmallestReadPoint();
  InternalScanner scanner = createScanner(snapshot.getScanner(), smallestReadPoint);
  if (scanner == null) {
    return result; // NULL scanner returned from coprocessor hooks means skip normal processing
  }

  // Let policy select flush method.
  StripeFlushRequest req = this.policy.selectFlush(this.stripes, cellsCount);

  boolean success = false;
  StripeMultiFileWriter mw = null;
  try {
    mw = req.createWriter(); // Writer according to the policy.
    StripeMultiFileWriter.WriterFactory factory = createWriterFactory(
        snapshot.getTimeRangeTracker(), cellsCount);
    StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
    mw.init(storeScanner, factory, store.getComparator());

    synchronized (flushLock) {
      performFlush(scanner, mw, smallestReadPoint);
      result = mw.commitWriters(cacheFlushSeqNum, false);
      success = true;
    }
  } finally {
    if (!success && (mw != null)) {
      for (Path leftoverFile : mw.abortWriters()) {
        try {
          store.getFileSystem().delete(leftoverFile, false);
        } catch (Exception e) {
          LOG.error("Failed to delete a file after failed flush: " + e);
        }
      }
    }
    try {
      scanner.close();
    } catch (IOException ex) {
      LOG.warn("Failed to close flush scanner, ignoring", ex);
    }
  }
  return result;
}
项目:pbase    文件:StripeStoreFlusher.java   
@Override
public StripeMultiFileWriter createWriter() throws IOException {
  return new StripeMultiFileWriter.BoundaryMultiWriter(targetBoundaries, null, null);
}
项目:pbase    文件:StripeStoreFlusher.java   
@Override
public StripeMultiFileWriter createWriter() throws IOException {
  return new StripeMultiFileWriter.SizeMultiWriter(
      this.targetCount, this.targetKvs, OPEN_KEY, OPEN_KEY);
}
项目:HIndex    文件:StripeCompactor.java   
private List<Path> compactInternal(StripeMultiFileWriter mw, CompactionRequest request,
    byte[] majorRangeFromRow, byte[] majorRangeToRow) throws IOException {
  final Collection<StoreFile> filesToCompact = request.getFiles();
  final FileDetails fd = getFileDetails(filesToCompact, request.isMajor());
  this.progress = new CompactionProgress(fd.maxKeyCount);

  long smallestReadPoint = getSmallestReadPoint();
  List<StoreFileScanner> scanners = createFileScanners(filesToCompact, smallestReadPoint);

  boolean finished = false;
  InternalScanner scanner = null;
  try {
    // Get scanner to use.
    ScanType coprocScanType = ScanType.COMPACT_RETAIN_DELETES;
    scanner = preCreateCoprocScanner(request, coprocScanType, fd.earliestPutTs, scanners);
    if (scanner == null) {
      scanner = (majorRangeFromRow == null)
          ? createScanner(store, scanners,
              ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, fd.earliestPutTs)
          : createScanner(store, scanners,
              smallestReadPoint, fd.earliestPutTs, majorRangeFromRow, majorRangeToRow);
    }
    scanner = postCreateCoprocScanner(request, coprocScanType, scanner);
    if (scanner == null) {
      // NULL scanner returned from coprocessor hooks means skip normal processing.
      return new ArrayList<Path>();
    }

    // Create the writer factory for compactions.
    final boolean needMvcc = fd.maxMVCCReadpoint >= smallestReadPoint;
    final Compression.Algorithm compression = store.getFamily().getCompactionCompression();
    StripeMultiFileWriter.WriterFactory factory = new StripeMultiFileWriter.WriterFactory() {
      @Override
      public Writer createWriter() throws IOException {
        return store.createWriterInTmp(
            fd.maxKeyCount, compression, true, needMvcc, fd.maxTagsLength > 0);
      }
    };

    // Prepare multi-writer, and perform the compaction using scanner and writer.
    // It is ok here if storeScanner is null.
    StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
    mw.init(storeScanner, factory, store.getComparator());
    finished = performCompaction(scanner, mw, smallestReadPoint);
    if (!finished) {
      throw new InterruptedIOException( "Aborting compaction of store " + store +
          " in region " + store.getRegionInfo().getRegionNameAsString() +
          " because it was interrupted.");
    }
  } finally {
    if (scanner != null) {
      try {
        scanner.close();
      } catch (Throwable t) {
        // Don't fail the compaction if this fails.
        LOG.error("Failed to close scanner after compaction.", t);
      }
    }
    if (!finished) {
      for (Path leftoverFile : mw.abortWriters()) {
        try {
          store.getFileSystem().delete(leftoverFile, false);
        } catch (Exception ex) {
          LOG.error("Failed to delete the leftover file after an unfinished compaction.", ex);
        }
      }
    }
  }

  assert finished : "We should have exited the method on all error paths";
  List<Path> newFiles = mw.commitWriters(fd.maxSeqId, request.isMajor());
  assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata.";
  return newFiles;
}
项目:HIndex    文件:StripeStoreFlusher.java   
@Override
public List<Path> flushSnapshot(SortedSet<KeyValue> snapshot, long cacheFlushSeqNum,
    final TimeRangeTracker tracker, AtomicLong flushedSize, MonitoredTask status)
        throws IOException {
  int kvCount = snapshot.size();
  if (kvCount == 0) return Collections.emptyList(); // don't flush if there are no entries

  long smallestReadPoint = store.getSmallestReadPoint();
  InternalScanner scanner = createScanner(snapshot, smallestReadPoint);
  if (scanner == null) {
    return Collections.emptyList(); // NULL scanner returned from coprocessor hooks means skip normal processing
  }

  // Let policy select flush method.
  StripeFlushRequest req = this.policy.selectFlush(this.stripes, kvCount);

  long flushedBytes = 0;
  boolean success = false;
  List<Path> result = null;
  StripeMultiFileWriter mw = null;
  try {
    mw = req.createWriter(); // Writer according to the policy.
    StripeMultiFileWriter.WriterFactory factory = createWriterFactory(tracker, kvCount);
    StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
    mw.init(storeScanner, factory, store.getComparator());

    synchronized (flushLock) {
      flushedBytes = performFlush(scanner, mw, smallestReadPoint);
      result = mw.commitWriters(cacheFlushSeqNum, false);
      success = true;
    }
  } finally {
    if (!success && (mw != null)) {
      if (result == null) {
        result = Collections.emptyList();
      } else {
        result.clear();
      }
      for (Path leftoverFile : mw.abortWriters()) {
        try {
          store.getFileSystem().delete(leftoverFile, false);
        } catch (Exception e) {
          LOG.error("Failed to delete a file after failed flush: " + e);
        }
      }
    }
    flushedSize.set(flushedBytes);
    try {
      scanner.close();
    } catch (IOException ex) {
      LOG.warn("Failed to close flush scanner, ignoring", ex);
    }
  }
  return result;
}
项目:HIndex    文件:StripeStoreFlusher.java   
@Override
public StripeMultiFileWriter createWriter() throws IOException {
  return new StripeMultiFileWriter.BoundaryMultiWriter(targetBoundaries, null, null);
}
项目:HIndex    文件:StripeStoreFlusher.java   
@Override
public StripeMultiFileWriter createWriter() throws IOException {
  return new StripeMultiFileWriter.SizeMultiWriter(
      this.targetCount, this.targetKvs, OPEN_KEY, OPEN_KEY);
}
项目:PyroDB    文件:StripeCompactor.java   
private List<Path> compactInternal(StripeMultiFileWriter mw, CompactionRequest request,
    byte[] majorRangeFromRow, byte[] majorRangeToRow) throws IOException {
  final Collection<StoreFile> filesToCompact = request.getFiles();
  final FileDetails fd = getFileDetails(filesToCompact, request.isMajor());
  this.progress = new CompactionProgress(fd.maxKeyCount);

  long smallestReadPoint = getSmallestReadPoint();
  List<StoreFileScanner> scanners = createFileScanners(filesToCompact, smallestReadPoint);

  boolean finished = false;
  InternalScanner scanner = null;
  try {
    // Get scanner to use.
    ScanType coprocScanType = ScanType.COMPACT_RETAIN_DELETES;
    scanner = preCreateCoprocScanner(request, coprocScanType, fd.earliestPutTs, scanners);
    if (scanner == null) {
      scanner = (majorRangeFromRow == null)
          ? createScanner(store, scanners,
              ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, fd.earliestPutTs)
          : createScanner(store, scanners,
              smallestReadPoint, fd.earliestPutTs, majorRangeFromRow, majorRangeToRow);
    }
    scanner = postCreateCoprocScanner(request, coprocScanType, scanner);
    if (scanner == null) {
      // NULL scanner returned from coprocessor hooks means skip normal processing.
      return new ArrayList<Path>();
    }

    // Create the writer factory for compactions.
    final boolean needMvcc = fd.maxMVCCReadpoint >= smallestReadPoint;
    final Compression.Algorithm compression = store.getFamily().getCompactionCompression();
    StripeMultiFileWriter.WriterFactory factory = new StripeMultiFileWriter.WriterFactory() {
      @Override
      public Writer createWriter() throws IOException {
        return store.createWriterInTmp(
            fd.maxKeyCount, compression, true, needMvcc, fd.maxTagsLength > 0);
      }
    };

    // Prepare multi-writer, and perform the compaction using scanner and writer.
    // It is ok here if storeScanner is null.
    StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
    mw.init(storeScanner, factory, store.getComparator());
    finished = performCompaction(scanner, mw, smallestReadPoint);
    if (!finished) {
      throw new InterruptedIOException( "Aborting compaction of store " + store +
          " in region " + store.getRegionInfo().getRegionNameAsString() +
          " because it was interrupted.");
    }
  } finally {
    if (scanner != null) {
      try {
        scanner.close();
      } catch (Throwable t) {
        // Don't fail the compaction if this fails.
        LOG.error("Failed to close scanner after compaction.", t);
      }
    }
    if (!finished) {
      for (Path leftoverFile : mw.abortWriters()) {
        try {
          store.getFileSystem().delete(leftoverFile, false);
        } catch (Exception ex) {
          LOG.error("Failed to delete the leftover file after an unfinished compaction.", ex);
        }
      }
    }
  }

  assert finished : "We should have exited the method on all error paths";
  List<Path> newFiles = mw.commitWriters(fd.maxSeqId, request.isMajor());
  assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata.";
  return newFiles;
}
项目:PyroDB    文件:StripeStoreFlusher.java   
@Override
public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum,
    MonitoredTask status) throws IOException {
  List<Path> result = null;
  int cellsCount = snapshot.getCellsCount();
  if (cellsCount == 0) return result; // don't flush if there are no entries

  long smallestReadPoint = store.getSmallestReadPoint();
  InternalScanner scanner = createScanner(snapshot.getScanner(), smallestReadPoint);
  if (scanner == null) {
    return result; // NULL scanner returned from coprocessor hooks means skip normal processing
  }

  // Let policy select flush method.
  StripeFlushRequest req = this.policy.selectFlush(this.stripes, cellsCount);

  boolean success = false;
  StripeMultiFileWriter mw = null;
  try {
    mw = req.createWriter(); // Writer according to the policy.
    StripeMultiFileWriter.WriterFactory factory = createWriterFactory(
        snapshot.getTimeRangeTracker(), cellsCount);
    StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
    mw.init(storeScanner, factory, store.getComparator());

    synchronized (flushLock) {
      performFlush(scanner, mw, smallestReadPoint);
      result = mw.commitWriters(cacheFlushSeqNum, false);
      success = true;
    }
  } finally {
    if (!success && (mw != null)) {
      if (result != null) {
        result.clear();
      }
      for (Path leftoverFile : mw.abortWriters()) {
        try {
          store.getFileSystem().delete(leftoverFile, false);
        } catch (Exception e) {
          LOG.error("Failed to delete a file after failed flush: " + e);
        }
      }
    }
    try {
      scanner.close();
    } catch (IOException ex) {
      LOG.warn("Failed to close flush scanner, ignoring", ex);
    }
  }
  return result;
}
项目:PyroDB    文件:StripeStoreFlusher.java   
@Override
public StripeMultiFileWriter createWriter() throws IOException {
  return new StripeMultiFileWriter.BoundaryMultiWriter(targetBoundaries, null, null);
}
项目:PyroDB    文件:StripeStoreFlusher.java   
@Override
public StripeMultiFileWriter createWriter() throws IOException {
  return new StripeMultiFileWriter.SizeMultiWriter(
      this.targetCount, this.targetKvs, OPEN_KEY, OPEN_KEY);
}