Java 类org.apache.hadoop.hbase.util.HFileTestUtil 实例源码

项目:ditb    文件:TestLoadIncrementalHFiles.java   
@Test(timeout = 60000)
public void testLoadTooMayHFiles() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("testLoadTooMayHFiles");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  for (int i = 0; i <= MAX_FILES_PER_REGION_PER_FAMILY; i++) {
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + i), FAMILY, QUALIFIER, from, to, 1000);
  }

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(), "mytable_testLoadTooMayHFiles"};
  try {
    loader.run(args);
    fail("Bulk loading too many files should fail");
  } catch (IOException ie) {
    assertTrue(ie.getMessage().contains("Trying to load more than "
      + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
  }
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
/**
 * Write a random data file and a non-file in a dir with a valid family name
 * but not part of the table families. we should we able to bulkload without
 * getting the unmatched family exception. HBASE-13037/HBASE-13227
 */
private void testNonHfileFolder(String tableName, boolean preCreateTable) throws Exception {
  Path dir = util.getDataTestDirOnTestFS(tableName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);

  Path familyDir = new Path(dir, Bytes.toString(FAMILY));
  HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_0"),
      FAMILY, QUALIFIER, Bytes.toBytes("begin"), Bytes.toBytes("end"), 500);
  createRandomDataFile(fs, new Path(familyDir, "012356789"), 16 * 1024);

  final String NON_FAMILY_FOLDER = "_logs";
  Path nonFamilyDir = new Path(dir, NON_FAMILY_FOLDER);
  fs.mkdirs(nonFamilyDir);
  fs.mkdirs(new Path(nonFamilyDir, "non-file"));
  createRandomDataFile(fs, new Path(nonFamilyDir, "012356789"), 16 * 1024);

  Table table = null;
  try {
    if (preCreateTable) {
      table = util.createTable(TableName.valueOf(tableName), FAMILY);
    } else {
      table = util.getConnection().getTable(TableName.valueOf(tableName));
    }

    final String[] args = {dir.toString(), tableName};
    new LoadIncrementalHFiles(util.getConfiguration()).run(args);
    assertEquals(500, util.countRows(table));
  } finally {
    if (table != null) {
      table.close();
    }
    fs.delete(dir, true);
  }
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
@Test(timeout = 120000)
public void testSplitStoreFile() throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFile");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER,
      Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadEncoding,
    DataBlockEncoding cfEncoding) throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFileWithDifferentEncoding");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setDataBlockEncoding(cfEncoding);
  HFileTestUtil.createHFileWithDataBlockEncoding(
      util.getConfiguration(), fs, testIn, bulkloadEncoding,
      FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
@Test(timeout = 120000)
public void testTableWithCFNameStartWithUnderScore() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("cfNameStartWithUnderScore");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  String family = "_cf";
  Path familyDir = new Path(dir, family);

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  Configuration conf = util.getConfiguration();
  String tableName = "mytable_cfNameStartWithUnderScore";
  Table table = util.createTable(TableName.valueOf(tableName), family);
  HFileTestUtil.createHFile(conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family),
    QUALIFIER, from, to, 1000);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
  String[] args = { dir.toString(), tableName };
  try {
    loader.run(args);
    assertEquals(1000, util.countRows(table));
  } finally {
    if (null != table) {
      table.close();
    }
  }
}
项目:pbase    文件:TestLoadIncrementalHFiles.java   
@Test
public void testSplitStoreFile() throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFile");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER,
      Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
项目:pbase    文件:TestLoadIncrementalHFiles.java   
@Test
public void testLoadTooMayHFiles() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("testLoadTooMayHFiles");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  for (int i = 0; i <= MAX_FILES_PER_REGION_PER_FAMILY; i++) {
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + i), FAMILY, QUALIFIER, from, to, 1000);
  }

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(), "mytable_testLoadTooMayHFiles"};
  try {
    loader.run(args);
    fail("Bulk loading too many files should fail");
  } catch (IOException ie) {
    assertTrue(ie.getMessage().contains("Trying to load more than "
      + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
  }
}
项目:HIndex    文件:TestLoadIncrementalHFiles.java   
@Test
public void testSplitStoreFile() throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFile");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER,
      Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
项目:HIndex    文件:TestLoadIncrementalHFiles.java   
@Test
public void testLoadTooMayHFiles() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("testLoadTooMayHFiles");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  for (int i = 0; i <= MAX_FILES_PER_REGION_PER_FAMILY; i++) {
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + i), FAMILY, QUALIFIER, from, to, 1000);
  }

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(), "mytable_testLoadTooMayHFiles"};
  try {
    loader.run(args);
    fail("Bulk loading too many files should fail");
  } catch (IOException ie) {
    assertTrue(ie.getMessage().contains("Trying to load more than "
      + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
  }
}
项目:hbase    文件:TestReplicationSyncUpToolWithBulkLoadedData.java   
private void loadAndValidateHFileReplication(String testName, byte[] row, byte[] fam,
    Table source, byte[][][] hfileRanges, int numOfRows) throws Exception {
  Path dir = utility1.getDataTestDirOnTestFS(testName);
  FileSystem fs = utility1.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(fam));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(utility1.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + hfileIdx++), fam, row, from, to, numOfRows);
  }

  final TableName tableName = source.getName();
  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(utility1.getConfiguration());
  String[] args = { dir.toString(), tableName.toString() };
  loader.run(args);
}
项目:hbase    文件:TestLoadIncrementalHFiles.java   
@Test(timeout = 120000)
public void testSplitStoreFile() throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFile");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  ColumnFamilyDescriptor familyDesc = ColumnFamilyDescriptorBuilder.of(FAMILY);
  HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER,
    Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(util.getConfiguration(), testIn, familyDesc,
    Bytes.toBytes("ggg"), bottomOut, topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
项目:hbase    文件:TestLoadIncrementalHFiles.java   
private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadEncoding,
    DataBlockEncoding cfEncoding) throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFileWithDifferentEncoding");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  ColumnFamilyDescriptor familyDesc =
      ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setDataBlockEncoding(cfEncoding).build();
  HFileTestUtil.createHFileWithDataBlockEncoding(util.getConfiguration(), fs, testIn,
    bulkloadEncoding, FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(util.getConfiguration(), testIn, familyDesc,
    Bytes.toBytes("ggg"), bottomOut, topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
项目:hbase    文件:TestLoadIncrementalHFiles.java   
@Test(timeout = 60000)
public void testLoadTooMayHFiles() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("testLoadTooMayHFiles");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  for (int i = 0; i <= MAX_FILES_PER_REGION_PER_FAMILY; i++) {
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_" + i),
      FAMILY, QUALIFIER, from, to, 1000);
  }

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String[] args = { dir.toString(), "mytable_testLoadTooMayHFiles" };
  try {
    loader.run(args);
    fail("Bulk loading too many files should fail");
  } catch (IOException ie) {
    assertTrue(ie.getMessage()
        .contains("Trying to load more than " + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
  }
}
项目:hbase    文件:TestRefreshHFilesEndpoint.java   
@Test
public void testRefreshRegionHFilesEndpoint() throws Exception {
  setUp(HRegion.class.getName());
  MasterFileSystem mfs = HTU.getMiniHBaseCluster().getMaster().getMasterFileSystem();
  Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), TABLE_NAME);
  for (Region region : cluster.getRegions(TABLE_NAME)) {
    Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
    Path familyDir = new Path(regionDir, Bytes.toString(FAMILY));
    HFileTestUtil
      .createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY,
                   QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS);
  }
  assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
  callRefreshRegionHFilesEndPoint();
  assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));
}
项目:PyroDB    文件:TestLoadIncrementalHFiles.java   
@Test
public void testSplitStoreFile() throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFile");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER,
      Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
项目:PyroDB    文件:TestLoadIncrementalHFiles.java   
@Test
public void testLoadTooMayHFiles() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("testLoadTooMayHFiles");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  for (int i = 0; i <= MAX_FILES_PER_REGION_PER_FAMILY; i++) {
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + i), FAMILY, QUALIFIER, from, to, 1000);
  }

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(), "mytable_testLoadTooMayHFiles"};
  try {
    loader.run(args);
    fail("Bulk loading too many files should fail");
  } catch (IOException ie) {
    assertTrue(ie.getMessage().contains("Trying to load more than "
      + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles"));
  }
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
    boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {

  for (boolean managed : new boolean[] { true, false }) {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
          + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    if (!util.getHBaseAdmin().tableExists(tableName)) {
      util.getHBaseAdmin().createTable(htd);
    }
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());

    if (managed) {
      try (HTable table = new HTable(util.getConfiguration(), tableName)) {
        loader.doBulkLoad(dir, table);
        assertEquals(expectedRows, util.countRows(table));
      }
    } else {
      try (Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
          HTable table = (HTable) conn.getTable(tableName)) {
        loader.doBulkLoad(dir, table);
      }
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
/**
 * Test that tags survive through a bulk load that needs to split hfiles.
 *
 * This test depends on the "hbase.client.rpc.codec" =  KeyValueCodecWithTags so that the client
 * can get tags in the responses.
 */
@Test(timeout = 60000)
public void htestTagsSurviveBulkLoadSplit() throws Exception {
  Path dir = util.getDataTestDirOnTestFS(tn.getMethodName());
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));
  // table has these split points
  byte [][] tableSplitKeys = new byte[][] {
          Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"),
          Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"),
  };

  // creating an hfile that has values that span the split points.
  byte[] from = Bytes.toBytes("ddd");
  byte[] to = Bytes.toBytes("ooo");
  HFileTestUtil.createHFileWithTags(util.getConfiguration(), fs,
      new Path(familyDir, tn.getMethodName()+"_hfile"),
      FAMILY, QUALIFIER, from, to, 1000);
  int expectedRows = 1000;

  TableName tableName = TableName.valueOf(tn.getMethodName());
  HTableDescriptor htd = buildHTD(tableName, BloomType.NONE);
  util.getHBaseAdmin().createTable(htd, tableSplitKeys);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(), tableName.toString()};
  loader.run(args);

  Table table = util.getConnection().getTable(tableName);
  try {
    assertEquals(expectedRows, util.countRows(table));
    HFileTestUtil.verifyTags(table);
  } finally {
    table.close();
  }

  util.deleteTable(tableName);
}
项目:pbase    文件:TestLoadIncrementalHFiles.java   
/**
 * Write a random data file in a dir with a valid family name but not part of the table families
 * we should we able to bulkload without getting the unmatched family exception. HBASE-13037
 */
@Test(timeout = 60000)
public void testNonHfileFolderWithUnmatchedFamilyName() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("testNonHfileFolderWithUnmatchedFamilyName");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);

  Path familyDir = new Path(dir, Bytes.toString(FAMILY));
  HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_0"),
      FAMILY, QUALIFIER, Bytes.toBytes("begin"), Bytes.toBytes("end"), 500);

  final String NON_FAMILY_FOLDER = "_logs";
  Path nonFamilyDir = new Path(dir, NON_FAMILY_FOLDER);
  fs.mkdirs(nonFamilyDir);
  createRandomDataFile(fs, new Path(nonFamilyDir, "012356789"), 16 * 1024);

  Table table = null;
  try {
    final String TABLE_NAME = "mytable_testNonHfileFolderWithUnmatchedFamilyName";
    table = util.createTable(TableName.valueOf(TABLE_NAME), FAMILY);

    final String[] args = {dir.toString(), TABLE_NAME};
    new LoadIncrementalHFiles(util.getConfiguration()).run(args);
    assertEquals(500, util.countRows(table));
  } finally {
    if (table != null) {
      table.close();
    }
    fs.delete(dir, true);
  }
}
项目:pbase    文件:TestLoadIncrementalHFiles.java   
@Test
public void testTableWithCFNameStartWithUnderScore() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("cfNameStartWithUnderScore");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  String family = "_cf";
  Path familyDir = new Path(dir, family);

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  Configuration conf = util.getConfiguration();
  String tableName = "mytable_cfNameStartWithUnderScore";
  Table table = util.createTable(TableName.valueOf(tableName), family);
  HFileTestUtil.createHFile(conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family),
    QUALIFIER, from, to, 1000);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
  String[] args = { dir.toString(), tableName };
  try {
    loader.run(args);
    assertEquals(1000, util.countRows(table));
  } finally {
    if (null != table) {
      table.close();
    }
  }
}
项目:HIndex    文件:TestLoadIncrementalHFiles.java   
private void runTest(String testName, BloomType bloomType, 
        byte[][][] hfileRanges) throws Exception {
  Path dir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
  }
  int expectedRows = hfileIdx * 1000;

  final byte[] TABLE = Bytes.toBytes("mytable_"+testName);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setBloomFilterType(bloomType);
  htd.addFamily(familyDesc);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(),"mytable_"+testName};
  loader.run(args);
  HTable table = new HTable(util.getConfiguration(), TABLE);

  assertEquals(expectedRows, util.countRows(table));
}
项目:hbase    文件:TestMasterReplication.java   
private void loadAndValidateHFileReplication(String testName, int masterNumber,
    int[] slaveNumbers, byte[] row, byte[] fam, Table[] tables, byte[][][] hfileRanges,
    int numOfRows, int[] expectedCounts, boolean toValidate) throws Exception {
  HBaseTestingUtility util = utilities[masterNumber];

  Path dir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  Path familyDir = new Path(dir, Bytes.toString(fam));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(util.getConfiguration(), fs,
      new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
  }

  Table source = tables[masterNumber];
  final TableName tableName = source.getName();
  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String[] args = { dir.toString(), tableName.toString() };
  loader.run(args);

  if (toValidate) {
    for (int slaveClusterNumber : slaveNumbers) {
      wait(slaveClusterNumber, tables[slaveClusterNumber], expectedCounts[slaveClusterNumber]);
    }
  }
}
项目:hbase    文件:TestLoadIncrementalHFiles.java   
/**
 * Test that tags survive through a bulk load that needs to split hfiles. This test depends on the
 * "hbase.client.rpc.codec" = KeyValueCodecWithTags so that the client can get tags in the
 * responses.
 */
@Test(timeout = 60000)
public void testTagsSurviveBulkLoadSplit() throws Exception {
  Path dir = util.getDataTestDirOnTestFS(tn.getMethodName());
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));
  // table has these split points
  byte[][] tableSplitKeys = new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"),
      Bytes.toBytes("jjj"), Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), };

  // creating an hfile that has values that span the split points.
  byte[] from = Bytes.toBytes("ddd");
  byte[] to = Bytes.toBytes("ooo");
  HFileTestUtil.createHFileWithTags(util.getConfiguration(), fs,
    new Path(familyDir, tn.getMethodName() + "_hfile"), FAMILY, QUALIFIER, from, to, 1000);
  int expectedRows = 1000;

  TableName tableName = TableName.valueOf(tn.getMethodName());
  TableDescriptor htd = buildHTD(tableName, BloomType.NONE);
  util.getAdmin().createTable(htd, tableSplitKeys);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String[] args = { dir.toString(), tableName.toString() };
  loader.run(args);

  Table table = util.getConnection().getTable(tableName);
  try {
    assertEquals(expectedRows, util.countRows(table));
    HFileTestUtil.verifyTags(table);
  } finally {
    table.close();
  }

  util.deleteTable(tableName);
}
项目:hbase    文件:TestLoadIncrementalHFiles.java   
/**
 * Write a random data file and a non-file in a dir with a valid family name but not part of the
 * table families. we should we able to bulkload without getting the unmatched family exception.
 * HBASE-13037/HBASE-13227
 */
private void testNonHfileFolder(String tableName, boolean preCreateTable) throws Exception {
  Path dir = util.getDataTestDirOnTestFS(tableName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());

  Path familyDir = new Path(dir, Bytes.toString(FAMILY));
  HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_0"), FAMILY,
    QUALIFIER, Bytes.toBytes("begin"), Bytes.toBytes("end"), 500);
  createRandomDataFile(fs, new Path(familyDir, "012356789"), 16 * 1024);

  final String NON_FAMILY_FOLDER = "_logs";
  Path nonFamilyDir = new Path(dir, NON_FAMILY_FOLDER);
  fs.mkdirs(nonFamilyDir);
  fs.mkdirs(new Path(nonFamilyDir, "non-file"));
  createRandomDataFile(fs, new Path(nonFamilyDir, "012356789"), 16 * 1024);

  Table table = null;
  try {
    if (preCreateTable) {
      table = util.createTable(TableName.valueOf(tableName), FAMILY);
    } else {
      table = util.getConnection().getTable(TableName.valueOf(tableName));
    }

    final String[] args = { dir.toString(), tableName };
    new LoadIncrementalHFiles(util.getConfiguration()).run(args);
    assertEquals(500, util.countRows(table));
  } finally {
    if (table != null) {
      table.close();
    }
    fs.delete(dir, true);
  }
}
项目:hbase    文件:TestLoadIncrementalHFiles.java   
@Test(timeout = 120000)
public void testTableWithCFNameStartWithUnderScore() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("cfNameStartWithUnderScore");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  String family = "_cf";
  Path familyDir = new Path(dir, family);

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  Configuration conf = util.getConfiguration();
  String tableName = tn.getMethodName();
  Table table = util.createTable(TableName.valueOf(tableName), family);
  HFileTestUtil.createHFile(conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family),
    QUALIFIER, from, to, 1000);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
  String[] args = { dir.toString(), tableName };
  try {
    loader.run(args);
    assertEquals(1000, util.countRows(table));
  } finally {
    if (null != table) {
      table.close();
    }
  }
}
项目:hbase    文件:IntegrationTestIngestStripeCompactions.java   
@Override
protected void initTable() throws IOException {
  // Do the same as the LoadTestTool does, but with different table configuration.
  HTableDescriptor htd = new HTableDescriptor(getTablename());
  htd.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName());
  htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "100");
  HColumnDescriptor hcd = new HColumnDescriptor(HFileTestUtil.DEFAULT_COLUMN_FAMILY);
  HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), htd, hcd);
}
项目:PyroDB    文件:TestLoadIncrementalHFiles.java   
private void runTest(String testName, BloomType bloomType, 
        byte[][][] hfileRanges) throws Exception {
  Path dir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
  }
  int expectedRows = hfileIdx * 1000;

  final byte[] TABLE = Bytes.toBytes("mytable_"+testName);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setBloomFilterType(bloomType);
  htd.addFamily(familyDesc);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(),"mytable_"+testName};
  loader.run(args);
  HTable table = new HTable(util.getConfiguration(), TABLE);

  assertEquals(expectedRows, util.countRows(table));
}
项目:pbase    文件:TestLoadIncrementalHFiles.java   
private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
    boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {
  Path dir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
  }
  int expectedRows = hfileIdx * 1000;

  if (preCreateTable) {
    util.getHBaseAdmin().createTable(htd, tableSplitKeys);
  }

  final TableName tableName = htd.getTableName();
  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  String [] args= {dir.toString(), tableName.toString()};
  loader.run(args);

  Table table = new HTable(util.getConfiguration(), tableName);
  try {
    assertEquals(expectedRows, util.countRows(table));
  } finally {
    table.close();
  }

  // verify staging folder has been cleaned up
  Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
  if(fs.exists(stagingBasePath)) {
    FileStatus[] files = fs.listStatus(stagingBasePath);
    for(FileStatus file : files) {
      assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
        file.getPath().getName() != "DONOTERASE");
    }
  }

  util.deleteTable(tableName);
}
项目:HIndex    文件:TestLoadIncrementalHFiles.java   
/**
 * Test loading into a column family that does not exist.
 */
@Test
public void testNonexistentColumnFamilyLoad() throws Exception {
  String testName = "testNonexistentColumnFamilyLoad";
  byte[][][] hFileRanges = new byte[][][] {
    new byte[][]{ Bytes.toBytes("aaa"), Bytes.toBytes("ccc") },
    new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
  }; 

  Path dir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  int hFileIdx = 0;
  for (byte[][] range : hFileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + hFileIdx++), FAMILY, QUALIFIER, from, to, 1000);
  }

  final byte[] TABLE = Bytes.toBytes("mytable_"+testName);

  HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
  // set real family name to upper case in purpose to simulate the case that
  // family name in HFiles is invalid
  HColumnDescriptor family =
      new HColumnDescriptor(Bytes.toBytes(new String(FAMILY).toUpperCase()));
  htd.addFamily(family);
  admin.createTable(htd, SPLIT_KEYS);

  HTable table = new HTable(util.getConfiguration(), TABLE);
  util.waitTableEnabled(TABLE);
  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  try {
    loader.doBulkLoad(dir, table);
    assertTrue("Loading into table with non-existent family should have failed", false);
  } catch (Exception e) {
    assertTrue("IOException expected", e instanceof IOException);
    // further check whether the exception message is correct
    String errMsg = e.getMessage();
    assertTrue("Incorrect exception message, expected message: ["
        + EXPECTED_MSG_FOR_NON_EXISTING_FAMILY + "], current message: [" + errMsg + "]",
        errMsg.contains(EXPECTED_MSG_FOR_NON_EXISTING_FAMILY));
  }
  table.close();
  admin.close();
}
项目:hbase    文件:TestReplicationSink.java   
/**
 * Test replicateEntries with a bulk load entry for 25 HFiles
 */
@Test
public void testReplicateEntriesForHFiles() throws Exception {
  Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
  Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
  int numRows = 10;

  List<Path> p = new ArrayList<>(1);

  // 1. Generate 25 hfile ranges
  Random rng = new SecureRandom();
  Set<Integer> numbers = new HashSet<>();
  while (numbers.size() < 50) {
    numbers.add(rng.nextInt(1000));
  }
  List<Integer> numberList = new ArrayList<>(numbers);
  Collections.sort(numberList);
  Map<String, Long> storeFilesSize = new HashMap<>(1);

  // 2. Create 25 hfiles
  Configuration conf = TEST_UTIL.getConfiguration();
  FileSystem fs = dir.getFileSystem(conf);
  Iterator<Integer> numbersItr = numberList.iterator();
  for (int i = 0; i < 25; i++) {
    Path hfilePath = new Path(familyDir, "hfile_" + i);
    HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1,
      Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
    p.add(hfilePath);
    storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
  }

  // 3. Create a BulkLoadDescriptor and a WALEdit
  Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
  storeFiles.put(FAM_NAME1, p);
  org.apache.hadoop.hbase.wal.WALEdit edit = null;
  WALProtos.BulkLoadDescriptor loadDescriptor = null;

  try (Connection c = ConnectionFactory.createConnection(conf);
      RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
    HRegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegionInfo();
    loadDescriptor =
        ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1,
            UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()),
            storeFiles, storeFilesSize, 1);
    edit = org.apache.hadoop.hbase.wal.WALEdit.createBulkLoadEvent(regionInfo,
      loadDescriptor);
  }
  List<WALEntry> entries = new ArrayList<>(1);

  // 4. Create a WALEntryBuilder
  WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);

  // 5. Copy the hfile to the path as it is in reality
  for (int i = 0; i < 25; i++) {
    String pathToHfileFromNS =
        new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR)
            .append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR)
            .append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray()))
            .append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR)
            .append("hfile_" + i).toString();
    String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;

    FileUtil.copy(fs, p.get(0), fs, new Path(dst), false, conf);
  }

  entries.add(builder.build());
  try (ResultScanner scanner = table1.getScanner(new Scan())) {
    // 6. Assert no existing data in table
    assertEquals(0, scanner.next(numRows).length);
  }
  // 7. Replicate the bulk loaded entry
  SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()),
    replicationClusterId, baseNamespaceDir, hfileArchiveDir);
  try (ResultScanner scanner = table1.getScanner(new Scan())) {
    // 8. Assert data is replicated
    assertEquals(numRows, scanner.next(numRows).length);
  }
}
项目:PyroDB    文件:TestLoadIncrementalHFiles.java   
/**
 * Test loading into a column family that does not exist.
 */
@Test
public void testNonexistentColumnFamilyLoad() throws Exception {
  String testName = "testNonexistentColumnFamilyLoad";
  byte[][][] hFileRanges = new byte[][][] {
    new byte[][]{ Bytes.toBytes("aaa"), Bytes.toBytes("ccc") },
    new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
  }; 

  Path dir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  int hFileIdx = 0;
  for (byte[][] range : hFileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + hFileIdx++), FAMILY, QUALIFIER, from, to, 1000);
  }

  final byte[] TABLE = Bytes.toBytes("mytable_"+testName);

  HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
  // set real family name to upper case in purpose to simulate the case that
  // family name in HFiles is invalid
  HColumnDescriptor family =
      new HColumnDescriptor(Bytes.toBytes(new String(FAMILY).toUpperCase()));
  htd.addFamily(family);
  admin.createTable(htd, SPLIT_KEYS);

  HTable table = new HTable(util.getConfiguration(), TABLE);
  util.waitTableEnabled(TABLE);
  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  try {
    loader.doBulkLoad(dir, table);
    assertTrue("Loading into table with non-existent family should have failed", false);
  } catch (Exception e) {
    assertTrue("IOException expected", e instanceof IOException);
    // further check whether the exception message is correct
    String errMsg = e.getMessage();
    assertTrue("Incorrect exception message, expected message: ["
        + EXPECTED_MSG_FOR_NON_EXISTING_FAMILY + "], current message: [" + errMsg + "]",
        errMsg.contains(EXPECTED_MSG_FOR_NON_EXISTING_FAMILY));
  }
  table.close();
  admin.close();
}