Java 类org.apache.hadoop.hbase.client.ClientSideRegionScanner 实例源码

项目:ditb    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);


  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner =
      new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
项目:hbase    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
  this.split = split;
  TableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);


  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);
  scan.setScanMetricsEnabled(true);

  scanner =
      new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
项目:pbase    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);

  Path tmpRootDir = new Path(conf.get(RESTORE_DIR_KEY)); // This is the user specified root
  // directory where snapshot was restored

  // create scan
  // TODO: mapred does not support scan as input API. Work around for now.
  if (conf.get(TableInputFormat.SCAN) != null) {
    scan = TableMapReduceUtil.convertStringToScan(conf.get(TableInputFormat.SCAN));
  } else if (conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST) != null) {
    String[] columns =
      conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" ");
    scan = new Scan();
    for (String col : columns) {
      scan.addFamily(Bytes.toBytes(col));
    }
  } else {
    throw new IllegalArgumentException("A Scan is not configured for this job");
  }

  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner = new ClientSideRegionScanner(conf, fs, tmpRootDir, htd, hri, scan, null);
}
项目:PyroDB    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);

  Path tmpRootDir = new Path(conf.get(TABLE_DIR_KEY)); // This is the user specified root
  // directory where snapshot was restored

  // create scan
  // TODO: mapred does not support scan as input API. Work around for now.
  if (conf.get(TableInputFormat.SCAN) != null) {
    scan = TableMapReduceUtil.convertStringToScan(conf.get(TableInputFormat.SCAN));
  } else if (conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST) != null) {
    String[] columns =
      conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" ");
    scan = new Scan();
    for (String col : columns) {
      scan.addFamily(Bytes.toBytes(col));
    }
  } else {
    throw new IllegalArgumentException("A Scan is not configured for this job");
  }

  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner = new ClientSideRegionScanner(conf, fs, tmpRootDir, htd, hri, scan, null);
}
项目:ditb    文件:TableSnapshotInputFormatImpl.java   
public ClientSideRegionScanner getScanner() {
  return scanner;
}
项目:pbase    文件:TableSnapshotInputFormatImpl.java   
public ClientSideRegionScanner getScanner() {
  return scanner;
}
项目:HIndex    文件:TableSnapshotInputFormatImpl.java   
public ClientSideRegionScanner getScanner() {
  return scanner;
}
项目:HIndex    文件:TableSnapshotInputFormatImpl.java   
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.split = split;
  String regionName = this.split.regionName;
  String snapshotName = getSnapshotName(conf);
  Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
  FileSystem fs = rootDir.getFileSystem(conf);

  Path tmpRootDir = new Path(conf.get(TABLE_DIR_KEY)); // This is the user specified root
  // directory where snapshot was restored

  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);

  //load table descriptor
  HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);

  //load region descriptor
  Path regionDir = new Path(snapshotDir, regionName);
  HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);

  // create scan
  // TODO: mapred does not support scan as input API. Work around for now.
  if (conf.get(TableInputFormat.SCAN) != null) {
    scan = TableMapReduceUtil.convertStringToScan(conf.get(TableInputFormat.SCAN));
  } else if (conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST) != null) {
    String[] columns =
      conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" ");
    scan = new Scan();
    for (String col : columns) {
      scan.addFamily(Bytes.toBytes(col));
    }
  } else {
    throw new IllegalArgumentException("A Scan is not configured for this job");
  }

  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner = new ClientSideRegionScanner(conf, fs, tmpRootDir, htd, hri, scan, null);
}
项目:hbase    文件:TableSnapshotInputFormatImpl.java   
public ClientSideRegionScanner getScanner() {
  return scanner;
}
项目:PyroDB    文件:TableSnapshotInputFormatImpl.java   
public ClientSideRegionScanner getScanner() {
  return scanner;
}