Java 类org.apache.hadoop.hbase.client.IsolationLevel 实例源码

项目:ditb    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);


  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner =
      new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
项目:ditb    文件:StoreScanner.java   
private StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
    List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
  this(store, scan, scanInfo, null,
      ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED), false);
  if (dropDeletesFromRow == null) {
    matcher =
        new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint, earliestPutTs,
            oldestUnexpiredTS, now, store.getCoprocessorHost());
  } else {
    matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs,
        oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
  }

  // Filter the list of scanners using Bloom filters, time range, TTL, etc.
  scanners = selectScannersFrom(scanners);

  // Seek all scanners to the initial key
  seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled);

  // Combine all seeked scanners with a heap
  resetKVHeap(scanners, store.getComparator());
}
项目:ditb    文件:TestCoprocessorScanPolicy.java   
@Override
public KeyValueScanner preStoreScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
    final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
  TableName tn = store.getTableName();
  if (!tn.isSystemTable()) {
    Long newTtl = ttls.get(store.getTableName());
    Integer newVersions = versions.get(store.getTableName());
    ScanInfo oldSI = store.getScanInfo();
    HColumnDescriptor family = store.getFamily();
    ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
        family.getName(), family.getMinVersions(),
        newVersions == null ? family.getMaxVersions() : newVersions,
        newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
        oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
    return new StoreScanner(store, scanInfo, scan, targetCols,
        ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  } else {
    return s;
  }
}
项目:pbase    文件:StoreScanner.java   
private StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
                     List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
                     long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
    this(store, false, scan, null, scanInfo.getTtl(), scanInfo.getMinVersions(),
            ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
    if (dropDeletesFromRow == null) {
        matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint,
                earliestPutTs, oldestUnexpiredTS, now, store.getCoprocessorHost());
    } else {
        matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs,
                oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
    }

    // Filter the list of scanners using Bloom filters, time range, TTL, etc.
    scanners = selectScannersFrom(scanners);

    // Seek all scanners to the initial key
    seekScanners(scanners, matcher.getStartKey(), false, isParallelSeekEnabled);

    // Combine all seeked scanners with a heap
    resetKVHeap(scanners, store.getComparator());
}
项目:pbase    文件:TestCoprocessorScanPolicy.java   
@Override
public KeyValueScanner preStoreScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
    final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
  TableName tn = store.getTableName();
  if (!tn.isSystemTable()) {
    Long newTtl = ttls.get(store.getTableName());
    Integer newVersions = versions.get(store.getTableName());
    ScanInfo oldSI = store.getScanInfo();
    HColumnDescriptor family = store.getFamily();
    ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
        newVersions == null ? family.getMaxVersions() : newVersions,
        newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
        oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
    return new StoreScanner(store, scanInfo, scan, targetCols,
        ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  } else {
    return s;
  }
}
项目:HIndex    文件:StoreScanner.java   
private StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
    List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
  this(store, false, scan, null, scanInfo.getTtl(), scanInfo.getMinVersions(),
      ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  if (dropDeletesFromRow == null) {
    matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint,
        earliestPutTs, oldestUnexpiredTS, store.getCoprocessorHost());
  } else {
    matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs,
        oldestUnexpiredTS, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
  }

  // Filter the list of scanners using Bloom filters, time range, TTL, etc.
  scanners = selectScannersFrom(scanners);

  // Seek all scanners to the initial key
  seekScanners(scanners, matcher.getStartKey(), false, isParallelSeekEnabled);

  // Combine all seeked scanners with a heap
  resetKVHeap(scanners, store.getComparator());
}
项目:hbase    文件:TestAtomicOperation.java   
private void assertICV(byte [] row,
                       byte [] familiy,
                       byte[] qualifier,
                       long amount,
                       boolean fast) throws IOException {
  // run a get and see?
  Get get = new Get(row);
  if (fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  get.addColumn(familiy, qualifier);
  Result result = region.get(get);
  assertEquals(1, result.size());

  Cell kv = result.rawCells()[0];
  long r = Bytes.toLong(CellUtil.cloneValue(kv));
  assertEquals(amount, r);
}
项目:hbase    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
  this.split = split;
  TableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);


  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);
  scan.setScanMetricsEnabled(true);

  scanner =
      new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
项目:PyroDB    文件:StoreScanner.java   
private StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
    List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
  this(store, false, scan, null, scanInfo.getTtl(), scanInfo.getMinVersions(),
      ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  if (dropDeletesFromRow == null) {
    matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint,
        earliestPutTs, oldestUnexpiredTS, store.getCoprocessorHost());
  } else {
    matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs,
        oldestUnexpiredTS, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
  }

  // Filter the list of scanners using Bloom filters, time range, TTL, etc.
  scanners = selectScannersFrom(scanners);

  // Seek all scanners to the initial key
  seekScanners(scanners, matcher.getStartKey(), false, isParallelSeekEnabled);

  // Combine all seeked scanners with a heap
  resetKVHeap(scanners, store.getComparator());
}
项目:PyroDB    文件:TestCoprocessorScanPolicy.java   
@Override
public KeyValueScanner preStoreScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
    final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
  TableName tn = store.getTableName();
  if (!tn.isSystemTable()) {
    Long newTtl = ttls.get(store.getTableName());
    Integer newVersions = versions.get(store.getTableName());
    ScanInfo oldSI = store.getScanInfo();
    HColumnDescriptor family = store.getFamily();
    ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
        newVersions == null ? family.getMaxVersions() : newVersions,
        newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
        oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
    return new StoreScanner(store, scanInfo, scan, targetCols,
        ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  } else {
    return s;
  }
}
项目:spliceengine    文件:SkeletonClientSideRegionScanner.java   
public SkeletonClientSideRegionScanner(Configuration conf,
                                          FileSystem fs,
                                          Path rootDir,
                                          HTableDescriptor htd,
                                          HRegionInfo hri,
                                          Scan scan, String hostAndPort) throws IOException {
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "init for regionInfo=%s, scan=%s", hri,scan);
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    this.conf = conf;
    this.fs = fs;
    this.rootDir = rootDir;
    this.htd = htd;
    this.hri = new SpliceHRegionInfo(hri);
    this.scan = scan;
       this.hostAndPort = hostAndPort;
}
项目:ditb    文件:HRegion.java   
@Override public long getReadpoint(IsolationLevel isolationLevel) {
  if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    // This scan can read even uncommitted transactions
    return Long.MAX_VALUE;
  }
  return mvcc.getReadPoint();
}
项目:ditb    文件:HRegion.java   
/**
 * Do a specific Get on passed <code>columnFamily</code> and column qualifiers from
 * <code>incrementCoordinates</code> only.
 *
 * @param increment
 * @param columnFamily
 * @param increments
 * @return Return the Cells to Increment
 * @throws IOException
 */
private List<Cell> getIncrementCurrentValue(final Increment increment, byte[] columnFamily,
    final List<Cell> increments, final IsolationLevel isolation) throws IOException {
  Get get = new Get(increment.getRow());
  if (isolation != null) get.setIsolationLevel(isolation);
  for (Cell cell : increments) {
    get.addColumn(columnFamily, CellUtil.cloneQualifier(cell));
  }
  TimeRange tr = increment.getTimeRange();
  if (tr != null) {
    get.setTimeRange(tr.getMin(), tr.getMax());
  }
  return get(get, false);
}
项目:ditb    文件:TestRowProcessorEndpoint.java   
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
项目:ditb    文件:TestRegionObserverScannerOpenHook.java   
@Override
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
    throws IOException {
  scan.setFilter(new NoDataFilter());
  return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:ditb    文件:ZooKeeperScanPolicyObserver.java   
@Override
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
    final KeyValueScanner s) throws IOException {
  ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
  if (scanInfo == null) {
    // take default action
    return null;
  }
  return new StoreScanner(store, scanInfo, scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:pbase    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);

  Path tmpRootDir = new Path(conf.get(RESTORE_DIR_KEY)); // This is the user specified root
  // directory where snapshot was restored

  // create scan
  // TODO: mapred does not support scan as input API. Work around for now.
  if (conf.get(TableInputFormat.SCAN) != null) {
    scan = TableMapReduceUtil.convertStringToScan(conf.get(TableInputFormat.SCAN));
  } else if (conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST) != null) {
    String[] columns =
      conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" ");
    scan = new Scan();
    for (String col : columns) {
      scan.addFamily(Bytes.toBytes(col));
    }
  } else {
    throw new IllegalArgumentException("A Scan is not configured for this job");
  }

  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner = new ClientSideRegionScanner(conf, fs, tmpRootDir, htd, hri, scan, null);
}
项目:pbase    文件:HRegion.java   
public long getReadpoint(IsolationLevel isolationLevel) {
    if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
        // This scan can read even uncommitted transactions
        return Long.MAX_VALUE;
    }
    return mvcc.memstoreReadPoint();
}
项目:pbase    文件:TestRowProcessorEndpoint.java   
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
项目:pbase    文件:TestRegionObserverScannerOpenHook.java   
@Override
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
    throws IOException {
  scan.setFilter(new NoDataFilter());
  return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:pbase    文件:ZooKeeperScanPolicyObserver.java   
@Override
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
    final KeyValueScanner s) throws IOException {
  ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
  if (scanInfo == null) {
    // take default action
    return null;
  }
  return new StoreScanner(store, scanInfo, scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:HIndex    文件:HRegion.java   
public long getReadpoint(IsolationLevel isolationLevel) {
  if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    // This scan can read even uncommitted transactions
    return Long.MAX_VALUE;
  }
  return mvcc.memstoreReadPoint();
}
项目:HIndex    文件:TestRowProcessorEndpoint.java   
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
项目:HIndex    文件:TestRegionObserverScannerOpenHook.java   
@Override
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
    throws IOException {
  scan.setFilter(new NoDataFilter());
  return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:HIndex    文件:TestCoprocessorScanPolicy.java   
@Override
public KeyValueScanner preStoreScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
    final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
  Long newTtl = ttls.get(store.getTableName());
  Integer newVersions = versions.get(store.getTableName());
  ScanInfo oldSI = store.getScanInfo();
  HColumnDescriptor family = store.getFamily();
  ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
      newVersions == null ? family.getMaxVersions() : newVersions,
      newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
      oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
  return new StoreScanner(store, scanInfo, scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:HIndex    文件:ZooKeeperScanPolicyObserver.java   
@Override
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
    final KeyValueScanner s) throws IOException {
  ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
  if (scanInfo == null) {
    // take default action
    return null;
  }
  return new StoreScanner(store, scanInfo, scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:hbase    文件:TestRowProcessorEndpoint.java   
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
项目:hbase    文件:HRegion.java   
/**
 * @return readpoint considering given IsolationLevel. Pass {@code null} for default
 */
public long getReadPoint(IsolationLevel isolationLevel) {
  if (isolationLevel != null && isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    // This scan can read even uncommitted transactions
    return Long.MAX_VALUE;
  }
  return mvcc.getReadPoint();
}
项目:hbase    文件:HRegion.java   
RegionScannerImpl(Scan scan, List<KeyValueScanner> additionalScanners, HRegion region,
    long nonceGroup, long nonce) throws IOException {
  this.region = region;
  this.maxResultSize = scan.getMaxResultSize();
  if (scan.hasFilter()) {
    this.filter = new FilterWrapper(scan.getFilter());
  } else {
    this.filter = null;
  }
  this.comparator = region.getCellComparator();
  /**
   * By default, calls to next/nextRaw must enforce the batch limit. Thus, construct a default
   * scanner context that can be used to enforce the batch limit in the event that a
   * ScannerContext is not specified during an invocation of next/nextRaw
   */
  defaultScannerContext = ScannerContext.newBuilder()
      .setBatchLimit(scan.getBatch()).build();
  this.stopRow = scan.getStopRow();
  this.includeStopRow = scan.includeStopRow();

  // synchronize on scannerReadPoints so that nobody calculates
  // getSmallestReadPoint, before scannerReadPoints is updated.
  IsolationLevel isolationLevel = scan.getIsolationLevel();
  long mvccReadPoint = PackagePrivateFieldAccessor.getMvccReadPoint(scan);
  synchronized (scannerReadPoints) {
    if (mvccReadPoint > 0) {
      this.readPt = mvccReadPoint;
    } else if (nonce == HConstants.NO_NONCE || rsServices == null
        || rsServices.getNonceManager() == null) {
      this.readPt = getReadPoint(isolationLevel);
    } else {
      this.readPt = rsServices.getNonceManager().getMvccFromOperationContext(nonceGroup, nonce);
    }
    scannerReadPoints.put(this, this.readPt);
  }
  initializeScanners(scan, additionalScanners);
}
项目:PyroDB    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);

  Path tmpRootDir = new Path(conf.get(TABLE_DIR_KEY)); // This is the user specified root
  // directory where snapshot was restored

  // create scan
  // TODO: mapred does not support scan as input API. Work around for now.
  if (conf.get(TableInputFormat.SCAN) != null) {
    scan = TableMapReduceUtil.convertStringToScan(conf.get(TableInputFormat.SCAN));
  } else if (conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST) != null) {
    String[] columns =
      conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" ");
    scan = new Scan();
    for (String col : columns) {
      scan.addFamily(Bytes.toBytes(col));
    }
  } else {
    throw new IllegalArgumentException("A Scan is not configured for this job");
  }

  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner = new ClientSideRegionScanner(conf, fs, tmpRootDir, htd, hri, scan, null);
}
项目:PyroDB    文件:HRegion.java   
public long getReadpoint(IsolationLevel isolationLevel) {
  if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    // This scan can read even uncommitted transactions
    return Long.MAX_VALUE;
  }
  return mvcc.memstoreReadPoint();
}
项目:PyroDB    文件:TestRowProcessorEndpoint.java   
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
项目:PyroDB    文件:TestRegionObserverScannerOpenHook.java   
@Override
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
    throws IOException {
  scan.setFilter(new NoDataFilter());
  return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:PyroDB    文件:ZooKeeperScanPolicyObserver.java   
@Override
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
    final KeyValueScanner s) throws IOException {
  ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
  if (scanInfo == null) {
    // take default action
    return null;
  }
  return new StoreScanner(store, scanInfo, scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
项目:c5    文件:TestRowProcessorEndpoint.java   
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
项目:DominoHBase    文件:TestRowProcessorEndpoint.java   
public static void doScan(
    HRegion region, Scan scan, List<KeyValue> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
项目:ditb    文件:Region.java   
/** @return readpoint considering given IsolationLevel */
long getReadpoint(IsolationLevel isolationLevel);
项目:ditb    文件:HRegion.java   
/**
 * Apply increments to a column family.
 *
 * @param sortedIncrements The passed in increments to apply MUST be sorted so that they match the
 *                         order that they appear in the Get results (get results will be sorted on return).
 *                         Otherwise, we won't be able to find the existing values if the cells are not specified
 *                         in order by the client since cells are in an array list.
 * @return Resulting increments after <code>sortedIncrements</code> have been applied to current
 * values (if any -- else passed increment is the final result).
 * @throws IOException
 * @islation Isolation level to use when running the 'get'. Pass null for default.
 */
private List<Cell> applyIncrementsToColumnFamily(Increment increment, byte[] columnFamilyName,
    List<Cell> sortedIncrements, long now, long mvccNum, List<Cell> allKVs,
    final IsolationLevel isolation) throws IOException {
  List<Cell> results = new ArrayList<Cell>(sortedIncrements.size());
  byte[] row = increment.getRow();
  // Get previous values for all columns in this family
  List<Cell> currentValues =
      getIncrementCurrentValue(increment, columnFamilyName, sortedIncrements, isolation);
  // Iterate the input columns and update existing values if they were found,
  // otherwise
  // add new column initialized to the increment amount
  int idx = 0;
  for (int i = 0; i < sortedIncrements.size(); i++) {
    Cell inc = sortedIncrements.get(i);
    long incrementAmount = getLongValue(inc);
    // If increment amount == 0, then don't write this Increment to the WAL.
    boolean writeBack = (incrementAmount != 0);
    // Carry forward any tags that might have been added by a coprocessor.
    List<Tag> tags = Tag.carryForwardTags(inc);

    Cell currentValue = null;
    long ts = now;
    if (idx < currentValues.size() && CellUtil.matchingQualifier(currentValues.get(idx), inc)) {
      currentValue = currentValues.get(idx);
      ts = Math.max(now, currentValue.getTimestamp());
      incrementAmount += getLongValue(currentValue);
      // Carry forward all tags
      tags = Tag.carryForwardTags(tags, currentValue);
      if (i < (sortedIncrements.size() - 1) && !CellUtil
          .matchingQualifier(inc, sortedIncrements.get(i + 1))) idx++;
    }

    // Append new incremented KeyValue to list
    byte[] qualifier = CellUtil.cloneQualifier(inc);
    byte[] incrementAmountInBytes = Bytes.toBytes(incrementAmount);
    tags = carryForwardTTLTag(tags, increment);

    Cell newValue =
        new KeyValue(row, 0, row.length, columnFamilyName, 0, columnFamilyName.length, qualifier,
            0, qualifier.length, ts, KeyValue.Type.Put, incrementAmountInBytes, 0,
            incrementAmountInBytes.length, tags);

    // Don't set an mvcc if none specified. The mvcc may be assigned later in
    // case where we
    // write the memstore AFTER we sync our edit to the log.
    if (mvccNum != MultiVersionConcurrencyControl.NO_WRITE_NUMBER) {
      CellUtil.setSequenceId(newValue, mvccNum);
    }

    // Give coprocessors a chance to update the new cell
    if (coprocessorHost != null) {
      newValue = coprocessorHost
          .postMutationBeforeWAL(RegionObserver.MutationType.INCREMENT, increment, currentValue,
              newValue);
    }
    allKVs.add(newValue);
    if (writeBack) {
      results.add(newValue);
    }
  }
  return results;
}
项目:AbacusUtil    文件:AnyScan.java   
public IsolationLevel getIsolationLevel() {
    return scan.getIsolationLevel();
}
项目:AbacusUtil    文件:AnyScan.java   
public AnyScan setIsolationLevel(IsolationLevel level) {
    scan.setIsolationLevel(level);

    return this;
}