private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cfd = new HColumnDescriptor(family); if (ttl > 0) { cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl)); } cfd.setMaxVersions(10); htd.addFamily(cfd); htd.addCoprocessor(TransactionProcessor.class.getName()); Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName()); Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog"); FileSystem fs = FileSystem.get(conf); assertTrue(fs.mkdirs(tablePath)); HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf); HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName)); HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo); return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null)); }
/** * @param b Family name. * @return <code>b</code> * @throws IllegalArgumentException If not null and not a legitimate family * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because * <code>b</code> can be null when deserializing). Cannot start with a '.' * either. Also Family can not be an empty value or equal "recovered.edits". */ public static byte [] isLegalFamilyName(final byte [] b) { if (b == null) { return b; } if (b[0] == '.') { throw new IllegalArgumentException("Family names cannot start with a " + "period: " + Bytes.toString(b)); } for (int i = 0; i < b.length; i++) { if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { throw new IllegalArgumentException("Illegal character <" + b[i] + ">. Family names cannot contain control characters or colons: " + Bytes.toString(b)); } } byte[] recoveredEdit = Bytes.toBytes(HLog.RECOVERED_EDITS_DIR); if (Bytes.equals(recoveredEdit, b)) { throw new IllegalArgumentException("Family name cannot be: " + HLog.RECOVERED_EDITS_DIR); } return b; }
private List<Path> getLogDirs(final List<ServerName> serverNames) throws IOException { List<Path> logDirs = new ArrayList<Path>(); for(ServerName serverName: serverNames){ Path logDir = new Path(this.rootdir, HLog.getHLogDirectoryName(serverName.toString())); Path splitDir = logDir.suffix(HLog.SPLITTING_EXT); // rename the directory so a rogue RS doesn't create more HLogs if (fs.exists(logDir)) { if (!HBaseFileSystem.renameDirForFileSystem(fs, logDir, splitDir)) { throw new IOException("Failed fs.rename for log split: " + logDir); } logDir = splitDir; LOG.debug("Renamed region directory: " + splitDir); } else if (!fs.exists(splitDir)) { LOG.info("Log dir for server " + serverName + " does not exist"); continue; } logDirs.add(splitDir); } return logDirs; }
private void verifyRecoverEdits(final Path tableDir, final TableName tableName, final Map<byte[], byte[]> regionsMap) throws IOException { for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) { assertTrue(regionStatus.getPath().getName().startsWith(tableName.getNameAsString())); Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionStatus.getPath()); byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName()); assertFalse(regionsMap.containsKey(regionName)); for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) { HLog.Reader reader = HLogFactory.createReader(fs, logStatus.getPath(), conf); try { HLog.Entry entry; while ((entry = reader.next()) != null) { HLogKey key = entry.getKey(); assertEquals(tableName, key.getTablename()); assertArrayEquals(regionName, key.getEncodedRegionName()); } } finally { reader.close(); } } } }
/** * Setup WAL log and replication if enabled. Replication setup is done in here because it wants to * be hooked up to WAL. * @return A WAL instance. * @throws IOException */ private HLog setupWALAndReplication() throws IOException { final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(this.serverNameFromMasterPOV.toString())); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); if (this.fs.exists(logdir)) { throw new RegionServerRunningException("Region server has already " + "created directory at " + this.serverNameFromMasterPOV.toString()); } // Instantiate replication manager if replication enabled. Pass it the // log directories. createNewReplicationInstance(conf, this, this.fs, logdir, oldLogDir); return instantiateHLog(logdir, oldLogDir); }
/** * Tests that the LogRoller perform the roll even if there are no edits */ @Test public void testNoEdits() throws Exception { final String tableName = "TestLogRollPeriodNoEdits"; TEST_UTIL.createTable(tableName, "cf"); try { HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); try { HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName)); HLog log = server.getWAL(); checkMinLogRolls(log, 5); } finally { table.close(); } } finally { TEST_UTIL.deleteTable(tableName); } }
/** * Insert then do different types of deletes * @throws Exception */ @Test public void testMixedDeletes() throws Exception { HLog.Entry[] entries = new HLog.Entry[3]; for(int i = 0; i < 3; i++) { entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put); } SINK.replicateEntries(entries); entries = new HLog.Entry[3]; entries[0] = createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn); entries[1] = createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily); entries[2] = createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn); SINK.replicateEntries(entries); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); assertEquals(0, scanRes.next(3).length); }
/** * Puts are buffered, but this tests when a delete (not-buffered) is applied * before the actual Put that creates it. * @throws Exception */ @Test public void testApplyDeleteBeforePut() throws Exception { HLog.Entry[] entries = new HLog.Entry[5]; for(int i = 0; i < 2; i++) { entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put); } entries[2] = createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily); for(int i = 3; i < 5; i++) { entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put); } SINK.replicateEntries(entries); Get get = new Get(Bytes.toBytes(1)); Result res = table1.get(get); assertEquals(0, res.size()); }
private HRegion mergeAndVerify(final String msg, final String regionName1, final String regionName2, final HLog log, final int upperbound) throws Exception { Merge merger = new Merge(this.conf); LOG.info(msg); LOG.info("fs2=" + this.conf.get("fs.defaultFS")); int errCode = ToolRunner.run(this.conf, merger, new String[] {this.desc.getTableName().getNameAsString(), regionName1, regionName2} ); assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0); HRegionInfo mergedInfo = merger.getMergedHRegionInfo(); // Now verify that we can read all the rows from regions 0, 1 // in the new merged region. HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf); verifyMerge(merged, upperbound); merged.close(); LOG.info("Verified " + msg); return merged; }
@Override public boolean removeFromOnlineRegions(final HRegion r, ServerName destination) { HRegion toReturn = this.onlineRegions.remove(r.getRegionInfo().getEncodedName()); if (destination != null) { HLog wal = getWAL(); long closeSeqNum = wal.getEarliestMemstoreSeqNum(r.getRegionInfo().getEncodedNameAsBytes()); if (closeSeqNum == HConstants.NO_SEQNUM) { // No edits in WAL for this region; get the sequence number when the region was opened. closeSeqNum = r.getOpenSeqNum(); if (closeSeqNum == HConstants.NO_SEQNUM) { closeSeqNum = 0; } } addToMovedRegions(r.getRegionInfo().getEncodedName(), destination, closeSeqNum); } this.regionFavoredNodesMap.remove(r.getRegionInfo().getEncodedName()); return toReturn != null; }
/** * Convenience method creating new HRegions. Used by createTable. * The {@link HLog} for the created region needs to be closed * explicitly, if it is not null. * Use {@link HRegion#getLog()} to get access. * * @param info Info for region to create. * @param rootDir Root directory for HBase instance * @param tableDir table directory * @param conf * @param hTableDescriptor * @param hlog shared HLog * @param initialize - true to initialize the region * @param ignoreHLog - true to skip generate new hlog if it is null, mostly for createTable * @return new HRegion * @throws IOException */ public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Path tableDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize, final boolean ignoreHLog) throws IOException { LOG.info("creating HRegion " + info.getTable().getNameAsString() + " HTD == " + hTableDescriptor + " RootDir = " + rootDir + " Table name == " + info.getTable().getNameAsString()); FileSystem fs = FileSystem.get(conf); HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info); HLog effectiveHLog = hlog; if (hlog == null && !ignoreHLog) { effectiveHLog = HLogFactory.createHLog(fs, rfs.getRegionDir(), HConstants.HREGION_LOGDIR_NAME, conf); } HRegion region = HRegion.newHRegion(tableDir, effectiveHLog, fs, conf, info, hTableDescriptor, null); if (initialize) { // If initializing, set the sequenceId. It is also required by HLogPerformanceEvaluation when // verifying the WALEdits. region.setSequenceId(region.initialize()); } return region; }
private void writeTestLog(final Path logFile) throws IOException { fs.mkdirs(logFile.getParent()); HLog.Writer writer = HLog.createWriter(fs, logFile, conf); try { for (int i = 0; i < 7; ++i) { byte[] tableName = getTableName(i); for (int j = 0; j < 10; ++j) { byte[] regionName = getRegionName(tableName, j); for (int k = 0; k < 50; ++k) { byte[] rowkey = Bytes.toBytes("row-" + k); HLogKey key = new HLogKey(regionName, tableName, (long)k, System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID); WALEdit edit = new WALEdit(); edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey)); writer.append(new HLog.Entry(key, edit)); } } } } finally { writer.close(); } }
/** * Check that we don't get an exception if there is no recovered edits directory to copy * @throws Exception on failure */ @Test public void testNoEditsDir() throws Exception { SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class); FileSystem fs = UTIL.getTestFileSystem(); Path root = UTIL.getDataTestDir(); String regionName = "regionA"; Path regionDir = new Path(root, regionName); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root); try { // doesn't really matter where the region's snapshot directory is, but this is pretty close Path snapshotRegionDir = new Path(workingDir, regionName); fs.mkdirs(snapshotRegionDir); Path regionEdits = HLog.getRegionDirRecoveredEditsDir(regionDir); assertFalse("Edits dir exists already - it shouldn't", fs.exists(regionEdits)); CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir); task.call(); } finally { // cleanup the working directory FSUtils.delete(fs, regionDir, true); FSUtils.delete(fs, workingDir, true); } }
private HRegion mergeAndVerify(final String msg, final String regionName1, final String regionName2, final HLog log, final int upperbound) throws Exception { Merge merger = new Merge(this.conf); LOG.info(msg); System.out.println("fs2=" + this.conf.get("fs.defaultFS")); int errCode = ToolRunner.run(this.conf, merger, new String[] {this.desc.getNameAsString(), regionName1, regionName2} ); assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0); HRegionInfo mergedInfo = merger.getMergedHRegionInfo(); // Now verify that we can read all the rows from regions 0, 1 // in the new merged region. HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf); verifyMerge(merged, upperbound); merged.close(); LOG.info("Verified " + msg); return merged; }
/** * Setup WAL log and replication if enabled. * Replication setup is done in here because it wants to be hooked up to WAL. * @return A WAL instance. * @throws IOException */ private HLog setupWALAndReplication() throws IOException { final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); final String logName = HLogUtil.getHLogDirectoryName(this.serverNameFromMasterPOV.toString()); Path logdir = new Path(rootDir, logName); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); if (this.fs.exists(logdir)) { throw new RegionServerRunningException("Region server has already " + "created directory at " + this.serverNameFromMasterPOV.toString()); } // Instantiate replication manager if replication enabled. Pass it the // log directories. createNewReplicationInstance(conf, this, this.fs, logdir, oldLogDir); return instantiateHLog(rootDir, logName); }
private void init(String methodName, Configuration conf, HColumnDescriptor hcd) throws IOException { //Setting up a Store Path basedir = new Path(DIR+methodName); Path logdir = new Path(DIR+methodName+"/logs"); Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME); FileSystem fs = FileSystem.get(conf); fs.delete(logdir, true); HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HLog hlog = new HLog(fs, logdir, oldLogDir, conf); HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); store = new Store(basedir, region, hcd, fs, conf); }
@Test(timeout = 180000) public void testIndexPutWithValueGreaterThanLength() throws IOException { String DIR = UTIL.getDataTestDir("TestStore").toString(); Path basedir = new Path(DIR + "TestIndexPut"); // Path logdir = new Path(DIR+"TestIndexPut"+"/logs"); FileSystem fs = UTIL.getTestFileSystem(); Configuration conf = UTIL.getConfiguration(); HTableDescriptor htd = new HTableDescriptor("TestIndexPut"); HRegionInfo info = new HRegionInfo(htd.getTableName(), "A".getBytes(), "B".getBytes(), false); HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(); HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); IndexSpecification spec = new IndexSpecification("testSpec"); spec.addIndexColumn(new HColumnDescriptor("cf1"), "ql1", ValueType.String, 10); spec.addIndexColumn(new HColumnDescriptor("cf2"), "ql1", ValueType.String, 10); // assert IOException when value length goes beyond the limit. byte[] rowKey = "Arow1".getBytes(); Put p = new Put(rowKey); long time = 1234567; boolean returnVal = false; try { p.add("cf1".getBytes(), "ql1".getBytes(), time, "testvalue11".getBytes()); IndexUtils.prepareIndexPut(p, spec, region); } catch (IOException e) { returnVal = true; } Assert.assertTrue(returnVal); }
@Test(timeout = 180000) public void testIndexTableValue() throws IOException { String DIR = UTIL.getDataTestDir("TestStore").toString(); Path basedir = new Path(DIR + "TestIndexPut"); // Path logdir = new Path(DIR+"TestIndexPut"+"/logs"); FileSystem fs = UTIL.getTestFileSystem(); Configuration conf = UTIL.getConfiguration(); HTableDescriptor htd = new HTableDescriptor("TestIndexPut"); HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false); HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(); HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); IndexSpecification spec = new IndexSpecification("index"); spec.addIndexColumn(new HColumnDescriptor("cf1"), "ql1", ValueType.String, 10); spec.addIndexColumn(new HColumnDescriptor("cf2"), "ql1", ValueType.String, 10); byte[] rowKey = "Arow1".getBytes(); Put p1 = new Put(rowKey); long time = 1234567; p1.add("cf1".getBytes(), "ql1".getBytes(), time, "testcase".getBytes()); p1.add("cf2".getBytes(), "ql1".getBytes(), time, "value".getBytes()); Put indexPut1 = IndexUtils.prepareIndexPut(p1, spec, region); List<Cell> kvs = indexPut1.get(Constants.IDX_COL_FAMILY, "".getBytes()); Cell kv = null; if (null != kvs) { kv = kvs.get(0); } byte[] val = kv.getValue(); byte[] startKeyLengthInBytes = new byte[2]; System.arraycopy(val, 0, startKeyLengthInBytes, 0, startKeyLengthInBytes.length); int startkeylen = (int) (Bytes.toShort(startKeyLengthInBytes)); Assert.assertEquals(3, startkeylen); byte[] rowKeyOffset = new byte[2]; System.arraycopy(val, startKeyLengthInBytes.length, rowKeyOffset, 0, rowKeyOffset.length); int rowKeyOffsetInt = Bytes.toShort(rowKeyOffset); Assert.assertEquals(42, rowKeyOffsetInt); }
@Override public boolean nextKeyValue() throws IOException, InterruptedException { if (reader == null) return false; HLog.Entry temp; long i = -1; do { // skip older entries try { temp = reader.next(currentEntry); i++; } catch (EOFException x) { LOG.info("Corrupted entry detected. Ignoring the rest of the file." + " (This is normal when a RegionServer crashed.)"); return false; } } while(temp != null && temp.getKey().getWriteTime() < startTime); if (temp == null) { if (i > 0) LOG.info("Skipped " + i + " entries."); LOG.info("Reached end of file."); return false; } else if (i > 0) { LOG.info("Skipped " + i + " entries, until ts: " + temp.getKey().getWriteTime() + "."); } boolean res = temp.getKey().getWriteTime() <= endTime; if (!res) { LOG.info("Reached ts: " + temp.getKey().getWriteTime() + " ignoring the rest of the file."); } return res; }
/** * Sanity check that we can move logs around while we are reading * from them. Should this test fail, ReplicationSource would have a hard * time reading logs that are being archived. * @throws Exception */ @Test public void testLogMoving() throws Exception{ Path logPath = new Path(logDir, "log"); if (!FS.exists(logDir)) FS.mkdirs(logDir); if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir); HLog.Writer writer = HLogFactory.createWALWriter(FS, logPath, conf); for(int i = 0; i < 3; i++) { byte[] b = Bytes.toBytes(Integer.toString(i)); KeyValue kv = new KeyValue(b,b,b); WALEdit edit = new WALEdit(); edit.add(kv); HLogKey key = new HLogKey(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID); writer.append(new HLog.Entry(key, edit)); writer.sync(); } writer.close(); HLog.Reader reader = HLogFactory.createReader(FS, logPath, conf); HLog.Entry entry = reader.next(); assertNotNull(entry); Path oldLogPath = new Path(oldLogDir, "log"); FS.rename(logPath, oldLogPath); entry = reader.next(); assertNotNull(entry); entry = reader.next(); entry = reader.next(); assertNull(entry); }
/** * Create a LogWriter for specified region if not already created. */ private LogWriter getOrCreateWriter(final byte[] regionName, long seqId) throws IOException { LogWriter writer = regionLogWriters.get(regionName); if (writer == null) { Path regionDir = HRegion.getRegionDir(tableDir, Bytes.toString(regionName)); Path dir = HLog.getRegionDirRecoveredEditsDir(regionDir); fs.mkdirs(dir); writer = new LogWriter(conf, fs, dir, seqId); regionLogWriters.put(regionName, writer); } return(writer); }
/** * @param snapshot Snapshot being taken * @param monitor error monitor for the snapshot * @param fs {@link FileSystem} where the snapshot is being taken * @param regionDir directory for the region to examine for edits * @param snapshotRegionDir directory for the region in the snapshot */ public CopyRecoveredEditsTask(SnapshotDescription snapshot, ForeignExceptionDispatcher monitor, FileSystem fs, Path regionDir, Path snapshotRegionDir) { super(snapshot, monitor); this.fs = fs; this.regiondir = regionDir; this.outputDir = HLog.getRegionDirRecoveredEditsDir(snapshotRegionDir); }
/** * @return the HLog * @throws IOException e */ public synchronized HLog getLog() throws IOException { if (this.log == null) { Path logdir = new Path(this.fs.getHomeDirectory(), HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis()); Path oldLogDir = new Path(this.fs.getHomeDirectory(), HConstants.HREGION_OLDLOGDIR_NAME); this.log = new HLog(this.fs, logdir, oldLogDir, this.conf); } return this.log; }
private HRegion merge(final HTableDescriptor htd, HRegionInfo info1, HRegion meta1, HRegionInfo info2, HRegion meta2) throws IOException { if (info1 == null) { throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " + Bytes.toStringBinary(meta1.getRegionName())); } if (info2 == null) { throw new IOException("Cound not find " + Bytes.toStringBinary(region2) + " in " + Bytes.toStringBinary(meta2.getRegionName())); } HRegion merged = null; HLog log = utils.getLog(); HRegion r1 = HRegion.openHRegion(info1, htd, log, getConf()); try { HRegion r2 = HRegion.openHRegion(info2, htd, log, getConf()); try { merged = HRegion.merge(r1, r2); } finally { if (!r2.isClosed()) { r2.close(); } } } finally { if (!r1.isClosed()) { r1.close(); } } // Remove the old regions from meta. // HRegion.merge has already deleted their files removeRegionFromMeta(meta1, info1); removeRegionFromMeta(meta2, info2); this.mergeInfo = merged.getRegionInfo(); return merged; }
/** * Creates, flushes, and closes a new region. */ public static HRegion createHDFSRegionDir(Configuration conf, HRegionInfo hri, HTableDescriptor htd) throws IOException { // Create HRegion Path root = FSUtils.getRootDir(conf); HRegion region = HRegion.createHRegion(hri, root, conf, htd); HLog hlog = region.getLog(); // Close the new region to flush to disk. Close log file too. region.close(); hlog.closeAndDelete(); return region; }
private synchronized HLog getMetaWAL() throws IOException { if (this.hlogForMeta == null) { final String logName = HLog.getHLogDirectoryName(this.serverNameFromMasterPOV.toString()); Path logdir = new Path(rootDir, logName); final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); this.hlogForMeta = new HLog(this.fs.getBackingFs(), logdir, oldLogDir, this.conf, getMetaWALActionListeners(), false, this.serverNameFromMasterPOV.toString(), true); } return this.hlogForMeta; }
private HLog getMetaWAL() throws IOException { if (this.hlogForMeta != null) return this.hlogForMeta; final String logName = HLogUtil.getHLogDirectoryName(this.serverNameFromMasterPOV.toString()); Path logdir = new Path(rootDir, logName); if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir); this.hlogForMeta = HLogFactory.createMetaHLog(this.fs.getBackingFs(), rootDir, logName, this.conf, getMetaWALActionListeners(), this.serverNameFromMasterPOV.toString()); return this.hlogForMeta; }
/** * Facility for dumping and compacting catalog tables. Only does catalog tables since these are * only tables we for sure know schema on. For usage run: * * <pre> * ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion * </pre> * @param args * @throws IOException */ public static void main(String[] args) throws IOException { if (args.length < 1) { printUsageAndExit(null); } boolean majorCompact = false; if (args.length > 1) { if (!args[1].toLowerCase().startsWith("major")) { printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">"); } majorCompact = true; } final Path tableDir = new Path(args[0]); final Configuration c = HBaseConfiguration.create(); final FileSystem fs = FileSystem.get(c); final Path logdir = new Path(c.get("hbase.tmp.dir"), "hlog" + tableDir.getName() + EnvironmentEdgeManager.currentTimeMillis()); final Path oldLogDir = new Path(c.get("hbase.tmp.dir"), HConstants.HREGION_OLDLOGDIR_NAME); final HLog log = new HLog(fs, logdir, oldLogDir, c); try { processTable(fs, tableDir, log, c, majorCompact); } finally { log.close(); // TODO: is this still right? BlockCache bc = new CacheConfig(c).getBlockCache(); if (bc != null) bc.shutdown(); } }
/** * Sanity check that we can move logs around while we are reading * from them. Should this test fail, ReplicationSource would have a hard * time reading logs that are being archived. * @throws Exception */ @Test public void testLogMoving() throws Exception{ Path logPath = new Path(logDir, "log"); if (!FS.exists(logDir)) FS.mkdirs(logDir); if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir); HLog.Writer writer = HLog.createWriter(FS, logPath, conf); for(int i = 0; i < 3; i++) { byte[] b = Bytes.toBytes(Integer.toString(i)); KeyValue kv = new KeyValue(b,b,b); WALEdit edit = new WALEdit(); edit.add(kv); HLogKey key = new HLogKey(b, b, 0, 0, HConstants.DEFAULT_CLUSTER_ID); writer.append(new HLog.Entry(key, edit)); writer.sync(); } writer.close(); HLog.Reader reader = HLog.getReader(FS, logPath, conf); HLog.Entry entry = reader.next(); assertNotNull(entry); Path oldLogPath = new Path(oldLogDir, "log"); FS.rename(logPath, oldLogPath); entry = reader.next(); assertNotNull(entry); entry = reader.next(); entry = reader.next(); assertNull(entry); }
/** * Insert a whole batch of entries * @throws Exception */ @Test public void testBatchSink() throws Exception { HLog.Entry[] entries = new HLog.Entry[BATCH_SIZE]; for(int i = 0; i < BATCH_SIZE; i++) { entries[i] = createEntry(TABLE_NAME1, i, KeyValue.Type.Put); } SINK.replicateEntries(entries); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length); }
private HLog.Entry createEntry(byte [] table, int row, KeyValue.Type type) { byte[] fam = Bytes.equals(table, TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2; byte[] rowBytes = Bytes.toBytes(row); // Just make sure we don't get the same ts for two consecutive rows with // same key try { Thread.sleep(1); } catch (InterruptedException e) { LOG.info("Was interrupted while sleep, meh", e); } final long now = System.currentTimeMillis(); KeyValue kv = null; if(type.getCode() == KeyValue.Type.Put.getCode()) { kv = new KeyValue(rowBytes, fam, fam, now, KeyValue.Type.Put, Bytes.toBytes(row)); } else if (type.getCode() == KeyValue.Type.DeleteColumn.getCode()) { kv = new KeyValue(rowBytes, fam, fam, now, KeyValue.Type.DeleteColumn); } else if (type.getCode() == KeyValue.Type.DeleteFamily.getCode()) { kv = new KeyValue(rowBytes, fam, null, now, KeyValue.Type.DeleteFamily); } HLogKey key = new HLogKey(table, table, now, now, HConstants.DEFAULT_CLUSTER_ID); WALEdit edit = new WALEdit(); edit.add(kv); return new HLog.Entry(key, edit); }
private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException { List<Path> logDirs = new ArrayList<Path>(); boolean needReleaseLock = false; if (!this.services.isInitialized()) { // during master initialization, we could have multiple places splitting a same wal this.splitLogLock.lock(); needReleaseLock = true; } try { for (ServerName serverName : serverNames) { Path logDir = new Path(this.rootdir, HLogUtil.getHLogDirectoryName(serverName.toString())); Path splitDir = logDir.suffix(HLog.SPLITTING_EXT); // Rename the directory so a rogue RS doesn't create more HLogs if (fs.exists(logDir)) { if (!this.fs.rename(logDir, splitDir)) { throw new IOException("Failed fs.rename for log split: " + logDir); } logDir = splitDir; LOG.debug("Renamed region directory: " + splitDir); } else if (!fs.exists(splitDir)) { LOG.info("Log dir for server " + serverName + " does not exist"); continue; } logDirs.add(splitDir); } } finally { if (needReleaseLock) { this.splitLogLock.unlock(); } } return logDirs; }
/** * Simple end-to-end test * @throws Exception */ @Test public void testWALPlayer() throws Exception { final byte[] TABLENAME1 = Bytes.toBytes("testWALPlayer1"); final byte[] TABLENAME2 = Bytes.toBytes("testWALPlayer2"); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] COLUMN1 = Bytes.toBytes("c1"); final byte[] COLUMN2 = Bytes.toBytes("c2"); final byte[] ROW = Bytes.toBytes("row"); HTable t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY); HTable t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); // put a row into the first table Put p = new Put(ROW); p.add(FAMILY, COLUMN1, COLUMN1); p.add(FAMILY, COLUMN2, COLUMN2); t1.put(p); // delete one column Delete d = new Delete(ROW); d.deleteColumns(FAMILY, COLUMN1); t1.delete(d); // replay the WAL, map table 1 to table 2 HLog log = cluster.getRegionServer(0).getWAL(); log.rollWriter(); String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() .getRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); WALPlayer player = new WALPlayer(TEST_UTIL.getConfiguration()); assertEquals(0, player.run(new String[] { walInputDir, Bytes.toString(TABLENAME1), Bytes.toString(TABLENAME2) })); // verify the WAL was player into table 2 Get g = new Get(ROW); Result r = t2.get(g); assertEquals(1, r.size()); assertTrue(Bytes.equals(COLUMN2, r.raw()[0].getQualifier())); }
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog) throws IOException { return createHRegion(info, rootDir, conf, hTableDescriptor, hlog, true); }
private void createRecoverEdits(final Path tableDir, final Set<String> tableRegions, final Set<String> recoverEdits) throws IOException { for (String region: tableRegions) { Path regionEditsDir = HLog.getRegionDirRecoveredEditsDir(new Path(tableDir, region)); long seqId = System.currentTimeMillis(); for (int i = 0; i < 3; ++i) { String editName = String.format("%019d", seqId + i); recoverEdits.add(editName); FSDataOutputStream stream = fs.create(new Path(regionEditsDir, editName)); stream.write(Bytes.toBytes("test")); stream.close(); } } }
@Override public void tearDown() throws Exception { HLog hlog = r.getLog(); this.r.close(); hlog.closeAndDelete(); super.tearDown(); }
@Override public HLog getWAL(HRegionInfo regionInfo) throws IOException { //TODO: at some point this should delegate to the HLogFactory //currently, we don't care about the region as much as we care about the //table.. (hence checking the tablename below) //_ROOT_ and hbase:meta regions have separate WAL. if (regionInfo != null && regionInfo.isMetaTable()) { return getMetaWAL(); } return this.hlog; }
@Override public void setUp() throws Exception { // setup config values necessary for store this.conf = TEST_UTIL.getConfiguration(); this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0); this.conf.setInt("hbase.hstore.compaction.min", minFiles); this.conf.setInt("hbase.hstore.compaction.max", maxFiles); this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize); this.conf.setLong("hbase.hstore.compaction.max.size", maxSize); this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F); // Setting up a Store Path basedir = new Path(DIR); Path logdir = new Path(DIR + "/logs"); Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family")); FileSystem fs = FileSystem.get(conf); fs.delete(logdir, true); HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes("table")); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HLog hlog = new HLog(fs, logdir, oldLogDir, conf); HRegion region = HRegion.createHRegion(info, basedir, conf, htd); HRegion.closeHRegion(region); Path tableDir = new Path(basedir, Bytes.toString(htd.getName())); region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); store = new Store(basedir, region, hcd, fs, conf); TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir()); fs.create(TEST_FILE); }