/** * Initialize this region. * * @param reporter Tickle every so often if initialize is taking a while. * @return What the next sequence (edit) id should be. * @throws IOException e */ private long initialize(final CancelableProgressable reporter) throws IOException { MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); long nextSeqId = -1; try { nextSeqId = initializeRegionInternals(reporter, status); return nextSeqId; } finally { // nextSeqid will be -1 if the initialization fails. // At least it will be 0 otherwise. if (nextSeqId == -1) { status.abort("Exception during region " + getRegionInfo().getRegionNameAsString() + " initialization."); } } }
/** * Close down this HRegion. Flush the cache unless abort parameter is true, Shut down each HStore, * don't service any more calls. This method could take some time to execute, so don't call it * from a time-sensitive thread. * * @param abort true if server is aborting (only during testing) * @return Vector of all the storage files that the HRegion's component HStores make use of. It's * a list of HStoreFile objects. Can be null if we are not to close at this time or we are * already closed. * @throws IOException e * @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was * not properly persisted. The region is put in closing mode, and the caller MUST abort * after this. */ public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. MonitoredTask status = TaskMonitor.get().createStatus("Closing region " + this + (abort ? " due to abort" : "")); status.setStatus("Waiting for close lock"); try { synchronized (closeLock) { return doClose(abort, status); } } finally { status.cleanup(); } }
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path, this, true); status.setStatus("Flushing " + this + ": reopening flushed file"); StoreFile sf = createStoreFileAndReader(dstPath); StoreFile.Reader r = sf.getReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); if (LOG.isInfoEnabled()) { LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId + ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1)); } return sf; }
/** * Try becoming active master. * @param startupStatus * @return True if we could successfully become the active master. * @throws InterruptedException */ private boolean becomeActiveMaster(MonitoredTask startupStatus) throws InterruptedException { // TODO: This is wrong!!!! Should have new servername if we restart ourselves, // if we come back to life. this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this); this.zooKeeper.registerListener(activeMasterManager); stallIfBackupMaster(this.conf, this.activeMasterManager); // The ClusterStatusTracker is setup before the other // ZKBasedSystemTrackers because it's needed by the activeMasterManager // to check if the cluster should be shutdown. this.clusterStatusTracker = new ClusterStatusTracker(getZooKeeper(), this); this.clusterStatusTracker.start(); return this.activeMasterManager.blockUntilBecomingActiveMaster(startupStatus, this.clusterStatusTracker); }
/** * Initialize this region. * @param reporter Tickle every so often if initialize is taking a while. * @return What the next sequence (edit) id should be. * @throws IOException e */ public long initialize(final CancelableProgressable reporter) throws IOException { MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); long nextSeqId = -1; try { nextSeqId = initializeRegionInternals(reporter, status); return nextSeqId; } finally { // nextSeqid will be -1 if the initialization fails. // At least it will be 0 otherwise. if (nextSeqId == -1) { status .abort("Exception during region " + this.getRegionNameAsString() + " initialization."); } } }
/** * Close down this HRegion. Flush the cache unless abort parameter is true, Shut down each HStore, * don't service any more calls. This method could take some time to execute, so don't call it * from a time-sensitive thread. * @param abort true if server is aborting (only during testing) * @return Vector of all the storage files that the HRegion's component HStores make use of. It's * a list of HStoreFile objects. Can be null if we are not to close at this time or we are * already closed. * @throws IOException e */ public List<StoreFile> close(final boolean abort) throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. MonitoredTask status = TaskMonitor.get().createStatus("Closing region " + this + (abort ? " due to abort" : "")); status.setStatus("Waiting for close lock"); try { synchronized (closeLock) { return doClose(abort, status); } } finally { status.cleanup(); } }
@Override public boolean commit(MonitoredTask status) throws IOException { if (storeFilePath == null) { return false; } storeFile = Store.this.commitFile(storeFilePath, cacheFlushId, snapshotTimeRangeTracker, flushedSize, status); if (Store.this.getHRegion().getCoprocessorHost() != null) { Store.this.getHRegion().getCoprocessorHost().postFlush(Store.this, storeFile); } // Add new file to store files. Clear snapshot too while we have // the Store write lock. return Store.this.updateStorefiles(storeFile, snapshot); }
public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, final SnapshotManifest manifest, final HTableDescriptor tableDescriptor, final Path rootDir, final ForeignExceptionDispatcher monitor, final MonitoredTask status) { this.fs = fs; this.conf = conf; this.snapshotManifest = manifest; this.snapshotDesc = manifest.getSnapshotDescription(); this.snapshotTable = TableName.valueOf(snapshotDesc.getTable()); this.tableDesc = tableDescriptor; this.rootDir = rootDir; this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName()); this.monitor = monitor; this.status = status; }
/** * Turns a snapshot of memstore into a set of store files. * * @param snapshot {@link PMemStoreSnapshot} snapshot. * @param cacheFlushSeqNum Log cache flush sequence number. * @param status Task that represents the flush operation and may be updated with status. * @return List of files written. Can be empty; must not be null. */ @Override public List<Path> flushSnapshot(PMemStoreSnapshot snapshot, long cacheFlushSeqNum, MonitoredTask status) throws IOException { if(snapshot.getMutationCount() == 0) return new ArrayList<>(); ArrayList<Path> result = new ArrayList<Path>(); Map<String, String> meta = new HashMap<>(); meta.put(HConstants.START_KEY, Bytes.toString(snapshot.getStartKey())); meta.put(HConstants.END_KEY, Bytes.toString(snapshot.getEndKey())); PStoreFile.Writer writer = ((HStore)(store)).createParquetWriter(meta); if(writer == null) return result; RowScanner scanner = snapshot.getScanner(); while (scanner.hasNext()){ Mutation m = scanner.nextRow(); writer.append(m); } writer.close(); result.add(writer.getFilePath()); return result; }
/** * Initialize this region. * * @param reporter Tickle every so often if initialize is taking a while. * @return What the next sequence (edit) id should be. * @throws IOException e */ private long initialize(final CancelableProgressable reporter) throws IOException { MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); long nextSeqId = -1; try { nextSeqId = initializeRegionInternals(reporter, status); return nextSeqId; } finally { // nextSeqid will be -1 if the initialization fails. // At least it will be 0 otherwise. if (nextSeqId == -1) { status .abort("Exception during region " + this.getRegionNameAsString() + " initialization."); } } }
/** * Close down this HRegion. Flush the cache unless abort parameter is true, * Shut down each HStore, don't service any more calls. * <p/> * This method could take some time to execute, so don't call it from a * time-sensitive thread. * * @param abort true if server is aborting (only during testing) * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of HStoreFile objects. Can be null if * we are not to close at this time or we are already closed. * @throws IOException e */ public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. MonitoredTask status = TaskMonitor.get().createStatus( "Closing region " + this + (abort ? " due to abort" : "")); status.setStatus("Waiting for close lock"); try { synchronized (closeLock) { return doClose(abort, status); } } finally { status.cleanup(); } }
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); status.setStatus("Flushing " + this + ": reopening flushed file"); StoreFile sf = createStoreFileAndReader(dstPath); StoreFile.Reader r = sf.getReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); if (LOG.isInfoEnabled()) { LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId + ", filesize=" + StringUtils.humanReadableInt(r.length())); } return sf; }
/** * Try becoming active master. * @param startupStatus * @return True if we could successfully become the active master. * @throws InterruptedException */ private boolean becomeActiveMaster(MonitoredTask startupStatus) throws InterruptedException { // TODO: This is wrong!!!! Should have new servername if we restart ourselves, // if we come back to life. this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this); this.zooKeeper.registerListener(activeMasterManager); stallIfBackupMaster(this.conf, this.activeMasterManager); // The ClusterStatusTracker is setup before the other // ZKBasedSystemTrackers because it's needed by the activeMasterManager // to check if the cluster should be shutdown. this.clusterStatusTracker = new ClusterStatusTracker(getZooKeeper(), this); this.clusterStatusTracker.start(); return this.activeMasterManager.blockUntilBecomingActiveMaster(startupStatus); }
public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, final SnapshotDescription snapshotDescription, final Path snapshotDir, final HTableDescriptor tableDescriptor, final Path rootDir, final ForeignExceptionDispatcher monitor, final MonitoredTask status) { this.fs = fs; this.conf = conf; this.snapshotDesc = snapshotDescription; this.snapshotTable = TableName.valueOf(snapshotDescription.getTable()); this.snapshotDir = snapshotDir; this.tableDesc = tableDescriptor; this.rootDir = rootDir; this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName()); this.monitor = monitor; this.status = status; }
/** * Close down this HRegion. Flush the cache unless abort parameter is true, * Shut down each HStore, don't service any more calls. * * This method could take some time to execute, so don't call it from a * time-sensitive thread. * * @param abort true if server is aborting (only during testing) * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of HStoreFile objects. Can be null if * we are not to close at this time or we are already closed. * * @throws IOException e */ public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. MonitoredTask status = TaskMonitor.get().createStatus( "Closing region " + this + (abort ? " due to abort" : "")); status.setStatus("Waiting for close lock"); try { synchronized (closeLock) { return doClose(abort, status); } } finally { status.cleanup(); } }
private StoreFile commitFile(final Path path, final long logCacheFlushId, TimeRangeTracker snapshotTimeRangeTracker, AtomicLong flushedSize, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); status.setStatus("Flushing " + this + ": reopening flushed file"); StoreFile sf = createStoreFileAndReader(dstPath); StoreFile.Reader r = sf.getReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); if (LOG.isInfoEnabled()) { LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId + ", filesize=" + StringUtils.humanReadableInt(r.length())); } return sf; }
/** * Initialize the restore helper, based on the snapshot and table information provided. */ private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir, final String sourceTableName, final HTableDescriptor htdClone) throws IOException { CatalogTracker catalogTracker = Mockito.mock(CatalogTracker.class); HTableDescriptor tableDescriptor = Mockito.mock(HTableDescriptor.class); ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class); MonitoredTask status = Mockito.mock(MonitoredTask.class); SnapshotDescription sd = SnapshotDescription.newBuilder() .setName("snapshot") .setTable(sourceTableName) .build(); return new RestoreSnapshotHelper(conf, fs, sd, snapshotDir, htdClone, rootDir, monitor, status); }
/** * Initialize this region. * * @param reporter Tickle every so often if initialize is taking a while. * @return What the next sequence (edit) id should be. * @throws IOException e */ public long initialize(final CancelableProgressable reporter) throws IOException { MonitoredTask status = TaskMonitor.get().createStatus( "Initializing region " + this); long nextSeqId = -1; try { nextSeqId = initializeRegionInternals(reporter, status); return nextSeqId; } finally { // nextSeqid will be -1 if the initialization fails. // At least it will be 0 otherwise. if (nextSeqId == -1) { status.abort("Exception during region " + this.getRegionNameAsString() + " initialization."); } } }
/** * Close down this HRegion. Flush the cache unless abort parameter is true, * Shut down each HStore, don't service any more calls. * * This method could take some time to execute, so don't call it from a * time-sensitive thread. * * @param abort true if server is aborting (only during testing) * @return Vector of all the storage files that the HRegion's component * HStores make use of. It's a list of HStoreFile objects. Can be null if * we are not to close at this time or we are already closed. * * @throws IOException e */ public List<StoreFile> close(final boolean abort) throws IOException { // Only allow one thread to close at a time. Serialize them so dual // threads attempting to close will run up against each other. MonitoredTask status = TaskMonitor.get().createStatus( "Closing region " + this + (abort ? " due to abort" : "")); status.setStatus("Waiting for close lock"); try { synchronized (closeLock) { return doClose(abort, status); } } finally { status.cleanup(); } }
@Override public boolean commit(MonitoredTask status) throws IOException { if (storeFilePath == null) { return false; } storeFile = Store.this.commitFile(storeFilePath, cacheFlushId, snapshotTimeRangeTracker, flushedSize, status); if (Store.this.getHRegion().getCoprocessorHost() != null) { Store.this.getHRegion() .getCoprocessorHost() .postFlush(Store.this, storeFile); } // Add new file to store files. Clear snapshot too while we have // the Store write lock. return Store.this.updateStorefiles(storeFile, snapshot); }
public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, final SnapshotManifest manifest, final TableDescriptor tableDescriptor, final Path rootDir, final ForeignExceptionDispatcher monitor, final MonitoredTask status, final boolean createBackRefs) { this.fs = fs; this.conf = conf; this.snapshotManifest = manifest; this.snapshotDesc = manifest.getSnapshotDescription(); this.snapshotTable = TableName.valueOf(snapshotDesc.getTable()); this.tableDesc = tableDescriptor; this.rootDir = rootDir; this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName()); this.monitor = monitor; this.status = status; this.createBackRefs = createBackRefs; }
private void waitTasks(TaskBatch batch, MonitoredTask status) { synchronized (batch) { while ((batch.done + batch.error) != batch.installed) { try { status.setStatus("Waiting for distributed tasks to finish. " + " scheduled=" + batch.installed + " done=" + batch.done + " error=" + batch.error); batch.wait(100); if (stopper.isStopped()) { LOG.warn("Stopped while waiting for log splits to be completed"); return; } } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for log splits to be completed"); Thread.currentThread().interrupt(); return; } } } }
private void waitForSplittingCompletion(TaskBatch batch, MonitoredTask status) { synchronized (batch) { while ((batch.done + batch.error) != batch.installed) { try { status.setStatus("Waiting for distributed tasks to finish. " + " scheduled=" + batch.installed + " done=" + batch.done + " error=" + batch.error); int remaining = batch.installed - (batch.done + batch.error); int actual = activeTasks(batch); if (remaining != actual) { LOG.warn("Expected " + remaining + " active tasks, but actually there are " + actual); } int remainingTasks = ((BaseCoordinatedStateManager) server.getCoordinatedStateManager()) .getSplitLogManagerCoordination().remainingTasksInCoordination(); if (remainingTasks >= 0 && actual > remainingTasks) { LOG.warn("Expected at least" + actual + " tasks remaining, but actually there are " + remainingTasks); } if (remainingTasks == 0 || actual == 0) { LOG.warn("No more task remaining, splitting " + "should have completed. Remaining tasks is " + remainingTasks + ", active tasks in map " + actual); if (remainingTasks == 0 && actual == 0) { return; } } batch.wait(100); if (stopper.isStopped()) { LOG.warn("Stopped while waiting for log splits to be completed"); return; } } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for log splits to be completed"); Thread.currentThread().interrupt(); return; } } } }
public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, final SnapshotManifest manifest, final HTableDescriptor tableDescriptor, final Path rootDir, final ForeignExceptionDispatcher monitor, final MonitoredTask status) { this(conf, fs, manifest, tableDescriptor, rootDir, monitor, status, true); }
public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, final SnapshotManifest manifest, final HTableDescriptor tableDescriptor, final Path rootDir, final ForeignExceptionDispatcher monitor, final MonitoredTask status, final boolean createBackRefs) { this.fs = fs; this.conf = conf; this.snapshotManifest = manifest; this.snapshotDesc = manifest.getSnapshotDescription(); this.snapshotTable = TableName.valueOf(snapshotDesc.getTable()); this.tableDesc = tableDescriptor; this.rootDir = rootDir; this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName()); this.monitor = monitor; this.status = status; this.createBackRefs = createBackRefs; }
/** * Copy the snapshot files for a snapshot scanner, discards meta changes. * @param conf * @param fs * @param rootDir * @param restoreDir * @param snapshotName * @throws IOException */ public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs, Path rootDir, Path restoreDir, String snapshotName) throws IOException { // ensure that restore dir is not under root dir if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) { throw new IllegalArgumentException("Filesystems for restore directory and HBase root directory " + "should be the same"); } if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath())) { throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " + "root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir); } Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); MonitoredTask status = TaskMonitor.get().createStatus( "Restoring snapshot '" + snapshotName + "' to directory " + restoreDir); ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(); // we send createBackRefs=false so that restored hfiles do not create back reference links // in the base hbase root dir. RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false); RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize. if (LOG.isDebugEnabled()) { LOG.debug("Restored table dir:" + restoreDir); FSUtils.logFileSystemState(fs, restoreDir, LOG); } return metaChanges; }
private void initializeWarmup(final CancelableProgressable reporter) throws IOException { MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); // Initialize all the HStores status.setStatus("Warming up all the Stores"); try { initializeStores(reporter, status); } finally { status.markComplete("Done warming up."); } }
protected void finalizeWriter(StoreFile.Writer writer, long cacheFlushSeqNum, MonitoredTask status) throws IOException { // Write out the log sequence number that corresponds to this output // hfile. Also write current time in metadata as minFlushTime. // The hfile is current up to and including cacheFlushSeqNum. status.setStatus("Flushing " + store + ": appending metadata"); writer.appendMetadata(cacheFlushSeqNum, false); status.setStatus("Flushing " + store + ": closing flushed file"); writer.close(); }