Java 类org.apache.hadoop.hbase.HConstants 实例源码

项目:ditb    文件:DefaultWALProvider.java   
/**
 * This function returns region server name from a log file name which is in one of the following
 * formats:
 * <ul>
 *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;-splitting/...</li>
 *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;/...</li>
 * </ul>
 * @param logFile
 * @return null if the passed in logFile isn't a valid WAL file path
 */
public static ServerName getServerNameFromWALDirectoryName(Path logFile) {
  String logDirName = logFile.getParent().getName();
  // We were passed the directory and not a file in it.
  if (logDirName.equals(HConstants.HREGION_LOGDIR_NAME)) {
    logDirName = logFile.getName();
  }
  ServerName serverName = null;
  if (logDirName.endsWith(SPLITTING_EXT)) {
    logDirName = logDirName.substring(0, logDirName.length() - SPLITTING_EXT.length());
  }
  try {
    serverName = ServerName.parseServerName(logDirName);
  } catch (IllegalArgumentException ex) {
    serverName = null;
    LOG.warn("Cannot parse a server name from path=" + logFile + "; " + ex.getMessage());
  }
  if (serverName != null && serverName.getStartcode() < 0) {
    LOG.warn("Invalid log file path=" + logFile);
    serverName = null;
  }
  return serverName;
}
项目:ditb    文件:TestAssignmentManagerOnCluster.java   
static void setupOnce() throws Exception {
  // Using the our load balancer to control region plans
  conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
    MyLoadBalancer.class, LoadBalancer.class);
  conf.setClass(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    MyRegionObserver.class, RegionObserver.class);
  // Reduce the maximum attempts to speed up the test
  conf.setInt("hbase.assignment.maximum.attempts", 3);
  // Put meta on master to avoid meta server shutdown handling
  conf.set("hbase.balancer.tablesOnMaster", "hbase:meta");
  conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
  conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);

  TEST_UTIL.startMiniCluster(1, 4, null, MyMaster.class, MyRegionServer.class);
  admin = TEST_UTIL.getHBaseAdmin();
}
项目:ditb    文件:TestMetaWithReplicas.java   
@Test
public void testHBaseFsckWithFewerMetaReplicaZnodes() throws Exception {
  ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
      TEST_UTIL.getConfiguration());
  RegionLocations rl = c.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
      false, false);
  HBaseFsckRepair.closeRegionSilentlyAndWait(c,
      rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo());
  ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
  ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(2));
  // check that problem exists
  HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false);
  assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN,ERROR_CODE.NO_META_REGION});
  // fix the problem
  hbck = doFsck(TEST_UTIL.getConfiguration(), true);
  // run hbck again to make sure we don't see any errors
  hbck = doFsck(TEST_UTIL.getConfiguration(), false);
  assertErrors(hbck, new ERROR_CODE[]{});
}
项目:ditb    文件:TestCoprocessorInterface.java   
Configuration initSplit() {
  // Always compact if there is more than one store file.
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 2);
  // Make lease timeout longer, lease checks less frequent
  TEST_UTIL.getConfiguration().setInt(
      "hbase.master.lease.thread.wakefrequency", 5 * 1000);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 10 * 1000);
  // Increase the amount of time between client retries
  TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000);
  // This size should make it so we always split using the addContent
  // below.  After adding all data, the first region is 1.3M
  TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE,
      1024 * 128);
  TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster",
      true);
  TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);

  return TEST_UTIL.getConfiguration();
}
项目:ditb    文件:TestFSHLog.java   
protected void addEdits(WAL log,
                        HRegionInfo hri,
                        HTableDescriptor htd,
                        int times,
                        MultiVersionConcurrencyControl mvcc)
    throws IOException {
  final byte[] row = Bytes.toBytes("row");
  for (int i = 0; i < times; i++) {
    long timestamp = System.currentTimeMillis();
    WALEdit cols = new WALEdit();
    cols.add(new KeyValue(row, row, row, timestamp, row));
    WALKey key = new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(),
        WALKey.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE,
        HConstants.NO_NONCE, mvcc);
    log.append(htd, hri, key, cols, true);
  }
  log.sync();
}
项目:ditb    文件:HRegionServerCommandLine.java   
private int start() throws Exception {
  Configuration conf = getConf();
  HRegionServer.loadWinterConf(conf, null);
  CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(conf);
  try {
    // If 'local', don't start a region server here. Defer to
    // LocalHBaseCluster. It manages 'local' clusters.
    if (LocalHBaseCluster.isLocal(conf)) {
      LOG.warn("Not starting a distinct region server because " + HConstants.CLUSTER_DISTRIBUTED
          + " is false");
    } else {
      logProcessInfo(getConf());
      HRegionServer hrs = HRegionServer.constructRegionServer(regionServerClass, conf, cp);
      hrs.start();
      hrs.join();
      if (hrs.isAborted()) {
        throw new RuntimeException("HRegionServer Aborted");
      }
    }
  } catch (Throwable t) {
    LOG.error("Region server exiting", t);
    return 1;
  }
  return 0;
}
项目:ditb    文件:TestRSKilledWhenInitializing.java   
@Override
protected void handleReportForDutyResponse(RegionServerStartupResponse c) throws IOException {
  if (firstRS.getAndSet(false)) {
    InetSocketAddress address = super.getRpcServer().getListenerAddress();
    if (address == null) {
      throw new IOException("Listener channel is closed");
    }
    for (NameStringPair e : c.getMapEntriesList()) {
      String key = e.getName();
      // The hostname the master sees us as.
      if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) {
        String hostnameFromMasterPOV = e.getValue();
        assertEquals(address.getHostName(), hostnameFromMasterPOV);
      }
    }
    while (!masterActive) {
      Threads.sleep(100);
    }
    super.kill();
  } else {
    super.handleReportForDutyResponse(c);
  }
}
项目:ditb    文件:SecureTestUtil.java   
/**
 * Revoke permissions globally from the given user. Will wait until all active
 * AccessController instances have updated their permissions caches or will
 * throw an exception upon timeout (10 seconds).
 */
public static void revokeGlobal(final HBaseTestingUtility util, final String user,
    final Permission.Action... actions) throws Exception {
  SecureTestUtil.updateACLs(util, new Callable<Void>() {
    @Override
    public Void call() throws Exception {
      try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        try (Table acl = connection.getTable(AccessControlLists.ACL_TABLE_NAME)) {
          BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_START_ROW);
          AccessControlService.BlockingInterface protocol =
              AccessControlService.newBlockingStub(service);
          ProtobufUtil.revoke(null, protocol, user, actions);
        }
      }
      return null;
    }
  });
}
项目:ditb    文件:ScanQueryMatcher.java   
public boolean moreRowsMayExistAfter(Cell kv) {
  if (this.isReversed) {
    if (rowComparator.compareRows(kv.getRowArray(), kv.getRowOffset(),
        kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) {
      return false;
    } else {
      return true;
    }
  }
  if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) &&
      rowComparator.compareRows(kv.getRowArray(),kv.getRowOffset(),
          kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) {
    // KV >= STOPROW
    // then NO there is nothing left.
    return false;
  } else {
    return true;
  }
}
项目:ditb    文件:HFileBlock.java   
/**
 * @param dataBlockEncoder data block encoding algorithm to use
 */
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
  this.dataBlockEncoder = dataBlockEncoder != null
      ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
      HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
  dataBlockEncodingCtx = this.dataBlockEncoder
      .newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);

  if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
    throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
        " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
        fileContext.getBytesPerChecksum());
  }

  baosInMemory = new ByteArrayOutputStream();

  prevOffsetByType = new long[BlockType.values().length];
  for (int i = 0; i < prevOffsetByType.length; ++i)
    prevOffsetByType[i] = -1;

  this.fileContext = fileContext;
}
项目:ditb    文件:HFileBlock.java   
/**
 * Starts writing into the block. The previous block's data is discarded.
 *
 * @return the stream the user can write their data into
 * @throws IOException
 */
public DataOutputStream startWriting(BlockType newBlockType)
    throws IOException {
  if (state == State.BLOCK_READY && startOffset != -1) {
    // We had a previous block that was written to a stream at a specific
    // offset. Save that offset as the last offset of a block of that type.
    prevOffsetByType[blockType.getId()] = startOffset;
  }

  startOffset = -1;
  blockType = newBlockType;

  baosInMemory.reset();
  baosInMemory.write(HConstants.HFILEBLOCK_DUMMY_HEADER);

  state = State.WRITING;

  // We will compress it later in finishBlock()
  userDataStream = new DataOutputStream(baosInMemory);
  if (newBlockType == BlockType.DATA) {
    this.dataBlockEncoder.startBlockEncoding(dataBlockEncodingCtx, userDataStream);
  }
  this.unencodedDataSizeWritten = 0;
  return userDataStream;
}
项目:ditb    文件:TestZKBasedOpenCloseRegion.java   
/**
 * If region open fails with IOException in openRegion() while doing tableDescriptors.get()
 * the region should not add into regionsInTransitionInRS map
 * @throws Exception
 */
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
  HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
      HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
  HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
  TableDescriptors htd = Mockito.mock(TableDescriptors.class);
  Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
  Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
  Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
  try {
    ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
      regionServer.getServerName(), REGIONINFO);
    fail("It should throw IOException ");
  } catch (IOException e) {
  }
  Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
  assertFalse("Region should not be in RIT",
      regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
项目:ditb    文件:BaseRowProcessorEndpoint.java   
/**
 * Pass a processor to region to process multiple rows atomically.
 * 
 * The RowProcessor implementations should be the inner classes of your
 * RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
 * the Coprocessor endpoint together.
 *
 * See {@code TestRowProcessorEndpoint} for example.
 *
 * The request contains information for constructing processor 
 * (see {@link #constructRowProcessorFromRequest}. The processor object defines
 * the read-modify-write procedure.
 */
@Override
public void process(RpcController controller, ProcessRequest request,
    RpcCallback<ProcessResponse> done) {
  ProcessResponse resultProto = null;
  try {
    RowProcessor<S,T> processor = constructRowProcessorFromRequest(request);
    Region region = env.getRegion();
    long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
    long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE;
    region.processRowsWithLocks(processor, nonceGroup, nonce);
    T result = processor.getResult();
    ProcessResponse.Builder b = ProcessResponse.newBuilder();
    b.setRowProcessorResult(result.toByteString());
    resultProto = b.build();
  } catch (Exception e) {
    ResponseConverter.setControllerException(controller, new IOException(e));
  }
  done.run(resultProto);
}
项目:ditb    文件:DefaultVisibilityLabelServiceImpl.java   
/**
 * Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus
 * might have some entries in it where the OpStatus is FAILURE. We will leave those and set in
 * others in the order.
 * @param mutations
 * @param finalOpStatus
 * @return whether we need a ZK update or not.
 */
private boolean mutateLabelsRegion(List<Mutation> mutations, OperationStatus[] finalOpStatus)
    throws IOException {
  OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations
    .toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
  int i = 0;
  boolean updateZk = false;
  for (OperationStatus status : opStatus) {
    // Update the zk when atleast one of the mutation was added successfully.
    updateZk = updateZk || (status.getOperationStatusCode() == OperationStatusCode.SUCCESS);
    for (; i < finalOpStatus.length; i++) {
      if (finalOpStatus[i] == null) {
        finalOpStatus[i] = status;
        break;
      }
    }
  }
  return updateZk;
}
项目:ditb    文件:ProcessBasedLocalHBaseCluster.java   
/**
 * Constructor. Modifies the passed configuration.
 * @param hbaseHome the top directory of the HBase source tree
 */
public ProcessBasedLocalHBaseCluster(Configuration conf,
    int numDataNodes, int numRegionServers) {
  this.conf = conf;
  this.hbaseHome = HBaseHomePath.getHomePath();
  this.numMasters = 1;
  this.numRegionServers = numRegionServers;
  this.workDir = hbaseHome + "/target/local_cluster";
  this.numDataNodes = numDataNodes;

  hbaseDaemonScript = hbaseHome + "/bin/hbase-daemon.sh";
  zkClientPort = HBaseTestingUtility.randomFreePort();

  this.rsPorts = sortedPorts(numRegionServers);
  this.masterPorts = sortedPorts(numMasters);

  conf.set(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
  conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
}
项目:ditb    文件:TokenUtil.java   
/**
 * Obtain and return an authentication token for the current user.
 * @param conn The HBase cluster connection
 * @return the authentication token instance
 */
public static Token<AuthenticationTokenIdentifier> obtainToken(
    Connection conn) throws IOException {
  Table meta = null;
  try {
    meta = conn.getTable(TableName.META_TABLE_NAME);
    CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
    AuthenticationProtos.AuthenticationService.BlockingInterface service =
        AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
    AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
        AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());

    return ProtobufUtil.toToken(response.getToken());
  } catch (ServiceException se) {
    ProtobufUtil.toIOException(se);
  } finally {
    if (meta != null) {
      meta.close();
    }
  }
  // dummy return for ServiceException block
  return null;
}
项目:ditb    文件:TestCoprocessorScanPolicy.java   
@Override
public InternalScanner preFlushScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
  Long newTtl = ttls.get(store.getTableName());
  if (newTtl != null) {
    System.out.println("PreFlush:" + newTtl);
  }
  Integer newVersions = versions.get(store.getTableName());
  ScanInfo oldSI = store.getScanInfo();
  HColumnDescriptor family = store.getFamily();
  ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
      family.getName(), family.getMinVersions(),
      newVersions == null ? family.getMaxVersions() : newVersions,
      newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
      oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
  Scan scan = new Scan();
  scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
  return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
      ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
      HConstants.OLDEST_TIMESTAMP);
}
项目:ditb    文件:HBaseAdmin.java   
/**
 * Stop the designated regionserver
 * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
 * <code>example.org:1234</code>
 * @throws IOException if a remote or network exception occurs
 */
@Override
public synchronized void stopRegionServer(final String hostnamePort)
throws IOException {
  String hostname = Addressing.parseHostname(hostnamePort);
  int port = Addressing.parsePort(hostnamePort);
  AdminService.BlockingInterface admin =
    this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
  StopServerRequest request = RequestConverter.buildStopServerRequest(
    "Called by admin client " + this.connection.toString());
  PayloadCarryingRpcController controller = rpcControllerFactory.newController();

  controller.setPriority(HConstants.HIGH_QOS);
  try {
    // TODO: this does not do retries, it should. Set priority and timeout in controller
    admin.stopServer(controller, request);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
项目:ditb    文件:TestZKBasedOpenCloseRegion.java   
private static void waitUntilAllRegionsAssigned()
throws IOException {
  HTable meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  while (true) {
    int rows = 0;
    Scan scan = new Scan();
    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
    ResultScanner s = meta.getScanner(scan);
    for (Result r = null; (r = s.next()) != null;) {
      byte [] b =
        r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
      if (b == null || b.length <= 0) {
        break;
      }
      rows++;
    }
    s.close();
    // If I get to here and all rows have a Server, then all have been assigned.
    if (rows >= countOfRegions) {
      break;
    }
    LOG.info("Found=" + rows);
    Threads.sleep(1000);
  }
  meta.close();
}
项目:ditb    文件:TestQuotaThrottle.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  TEST_UTIL.startMiniCluster(1);
  TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
  QuotaCache.setTEST_FORCE_REFRESH(true);

  tables = new HTable[TABLE_NAMES.length];
  for (int i = 0; i < TABLE_NAMES.length; ++i) {
    tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
  }
}
项目:ditb    文件:ServerNonceManager.java   
/**
 * Starts the operation if operation with such nonce has not already succeeded. If the
 * operation is in progress, waits for it to end and checks whether it has succeeded.
 * @param group Nonce group.
 * @param nonce Nonce.
 * @param stoppable Stoppable that terminates waiting (if any) when the server is stopped.
 * @return true if the operation has not already succeeded and can proceed; false otherwise.
 */
public boolean startOperation(long group, long nonce, Stoppable stoppable)
    throws InterruptedException {
  if (nonce == HConstants.NO_NONCE) return true;
  NonceKey nk = new NonceKey(group, nonce);
  OperationContext ctx = new OperationContext();
  while (true) {
    OperationContext oldResult = nonces.putIfAbsent(nk, ctx);
    if (oldResult == null) return true;

    // Collision with some operation - should be extremely rare.
    synchronized (oldResult) {
      int oldState = oldResult.getState();
      LOG.debug("Conflict detected by nonce: " + nk + ", " + oldResult);
      if (oldState != OperationContext.WAIT) {
        return oldState == OperationContext.PROCEED; // operation ended
      }
      oldResult.setHasWait();
      oldResult.wait(this.conflictWaitIterationMs); // operation is still active... wait and loop
      if (stoppable.isStopped()) {
        throw new InterruptedException("Server stopped");
      }
    }
  }
}
项目:ditb    文件:SequenceIdAccounting.java   
/**
 * Iterates over the given Map and compares sequence ids with corresponding
 * entries in {@link #oldestUnflushedRegionSequenceIds}. If a region in
 * {@link #oldestUnflushedRegionSequenceIds} has a sequence id less than that passed
 * in <code>sequenceids</code> then return it.
 * @param sequenceids Sequenceids keyed by encoded region name.
 * @return regions found in this instance with sequence ids less than those passed in.
 */
byte[][] findLower(Map<byte[], Long> sequenceids) {
  List<byte[]> toFlush = null;
  // Keeping the old behavior of iterating unflushedSeqNums under oldestSeqNumsLock.
  synchronized (tieLock) {
    for (Map.Entry<byte[], Long> e: sequenceids.entrySet()) {
      Map<byte[], Long> m = this.lowestUnflushedSequenceIds.get(e.getKey());
      if (m == null) continue;
      // The lowest sequence id outstanding for this region.
      long lowest = getLowestSequenceId(m);
      if (lowest != HConstants.NO_SEQNUM && lowest <= e.getValue()) {
        if (toFlush == null) toFlush = new ArrayList<byte[]>();
        toFlush.add(e.getKey());
      }
    }
  }
  return toFlush == null? null: toFlush.toArray(new byte[][] { HConstants.EMPTY_BYTE_ARRAY });
}
项目:ditb    文件:ZKServerTool.java   
public static ServerName[] readZKNodes(Configuration conf) {
  List<ServerName> hosts = new LinkedList<ServerName>();

  // Note that we do not simply grab the property
  // HConstants.ZOOKEEPER_QUORUM from the HBaseConfiguration because the
  // user may be using a zoo.cfg file.
  Properties zkProps = ZKConfig.makeZKProps(conf);
  for (Entry<Object, Object> entry : zkProps.entrySet()) {
    String key = entry.getKey().toString().trim();
    String value = entry.getValue().toString().trim();
    if (key.startsWith("server.")) {
      String[] parts = value.split(":");
      String host = parts[0];

      int port = HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT;
      if (parts.length > 1) {
        port = Integer.parseInt(parts[1]);
      }
      hosts.add(ServerName.valueOf(host, port, -1));
    }
  }
  return hosts.toArray(new ServerName[hosts.size()]);
}
项目:ditb    文件:TestFSUtils.java   
@Test
public void testVersion() throws DeserializationException, IOException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  final FileSystem fs = htu.getTestFileSystem();
  final Path rootdir = htu.getDataTestDir();
  assertNull(FSUtils.getVersion(fs, rootdir));
  // Write out old format version file.  See if we can read it in and convert.
  Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
  FSDataOutputStream s = fs.create(versionFile);
  final String version = HConstants.FILE_SYSTEM_VERSION;
  s.writeUTF(version);
  s.close();
  assertTrue(fs.exists(versionFile));
  FileStatus [] status = fs.listStatus(versionFile);
  assertNotNull(status);
  assertTrue(status.length > 0);
  String newVersion = FSUtils.getVersion(fs, rootdir);
  assertEquals(version.length(), newVersion.length());
  assertEquals(version, newVersion);
  // File will have been converted. Exercise the pb format
  assertEquals(version, FSUtils.getVersion(fs, rootdir));
  FSUtils.checkVersion(fs, rootdir, true);
}
项目:ditb    文件:TestHRegion.java   
private Configuration initSplit() {
  // Always compact if there is more than one store file.
  CONF.setInt("hbase.hstore.compactionThreshold", 2);

  // Make lease timeout longer, lease checks less frequent
  CONF.setInt("hbase.master.lease.thread.wakefrequency", 5 * 1000);

  CONF.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 10 * 1000);

  // Increase the amount of time between client retries
  CONF.setLong("hbase.client.pause", 15 * 1000);

  // This size should make it so we always split using the addContent
  // below. After adding all data, the first region is 1.3M
  CONF.setLong(HConstants.HREGION_MAX_FILESIZE, 1024 * 128);
  return CONF;
}
项目:ditb    文件:TestVisibilityLabelsWithCustomVisLabService.java   
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // setup configuration
  conf = TEST_UTIL.getConfiguration();
  conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false);
  VisibilityTestUtil.enableVisiblityLabels(conf);
  conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class,
      ScanLabelGenerator.class);
  conf.setClass(VisibilityLabelServiceManager.VISIBILITY_LABEL_SERVICE_CLASS,
      ExpAsStringVisibilityLabelServiceImpl.class, VisibilityLabelService.class);
  conf.set("hbase.superuser", "admin");
  TEST_UTIL.startMiniCluster(2);
  SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });

  // Wait for the labels table to become available
  TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
  addLabels();
}
项目:ditb    文件:TestLeaseRenewal.java   
/**
 * @throws java.lang.Exception
 */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
    leaseTimeout);
  TEST_UTIL.startMiniCluster();
}
项目:ditb    文件:StripeStoreFileManager.java   
/**
 * Finds the stripe index for the stripe containing a row provided externally for get/scan.
 */
private final int findStripeForRow(byte[] row, boolean isStart) {
  if (isStart && row == HConstants.EMPTY_START_ROW) return 0;
  if (!isStart && row == HConstants.EMPTY_END_ROW) return state.stripeFiles.size() - 1;
  // If there's an exact match below, a stripe ends at "row". Stripe right boundary is
  // exclusive, so that means the row is in the next stripe; thus, we need to add one to index.
  // If there's no match, the return value of binarySearch is (-(insertion point) - 1), where
  // insertion point is the index of the next greater element, or list size if none. The
  // insertion point happens to be exactly what we need, so we need to add one to the result.
  return Math.abs(Arrays.binarySearch(state.stripeEndRows, row, Bytes.BYTES_COMPARATOR) + 1);
}
项目:ditb    文件:TestReplicationAdmin.java   
/**
 * @throws java.lang.Exception
 */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  admin = new ReplicationAdmin(conf);
}
项目:ditb    文件:HMaster.java   
public int getRegionServerInfoPort(final ServerName sn) {
  RegionServerInfo info = this.regionServerTracker.getRegionServerInfo(sn);
  if (info == null || info.getInfoPort() == 0) {
    return conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
      HConstants.DEFAULT_REGIONSERVER_INFOPORT);
  }
  return info.getInfoPort();
}
项目:ditb    文件:RpcRetryingCallerFactory.java   
public RpcRetryingCallerFactory(Configuration conf, RetryingCallerInterceptor interceptor) {
  this.conf = conf;
  pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
  retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY,
      AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT);
  this.interceptor = interceptor;
  enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
      HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
}
项目:ditb    文件:ScanQueryMatcher.java   
/**
 * @param nextIndexed the key of the next entry in the block index (if any)
 * @param kv The Cell we're using to calculate the seek key
 * @return result of the compare between the indexed key and the key portion of the passed cell
 */
public int compareKeyForNextRow(Cell nextIndexed, Cell kv) {
  return rowComparator.compareKey(nextIndexed,
    kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
    null, 0, 0,
    null, 0, 0,
    HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
}
项目:ditb    文件:NoOpScanPolicyObserver.java   
/**
 * Reimplement the default behavior
 */
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
  ScanInfo oldSI = store.getScanInfo();
  ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), store.getFamily(), oldSI.getTtl(),
      oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
  Scan scan = new Scan();
  scan.setMaxVersions(oldSI.getMaxVersions());
  return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
      ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
项目:ditb    文件:TestLogRollAbort.java   
/**
 * Tests the case where a RegionServer enters a GC pause,
 * comes back online after the master declared it dead and started to split.
 * Want log rolling after a master split to fail. See HBASE-2312.
 */
@Test (timeout=300000)
public void testLogRollAfterSplitStart() throws IOException {
  LOG.info("Verify wal roll after split starts will fail.");
  String logName = "testLogRollAfterSplitStart";
  Path thisTestsDir = new Path(HBASEDIR, DefaultWALProvider.getWALDirectoryName(logName));
  final WALFactory wals = new WALFactory(conf, null, logName);

  try {
    // put some entries in an WAL
    TableName tableName =
        TableName.valueOf(this.getClass().getName());
    HRegionInfo regioninfo = new HRegionInfo(tableName,
        HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    final WAL log = wals.getWAL(regioninfo.getEncodedNameAsBytes());
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);

    final int total = 20;
    for (int i = 0; i < total; i++) {
      WALEdit kvs = new WALEdit();
      kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
      HTableDescriptor htd = new HTableDescriptor(tableName);
      htd.addFamily(new HColumnDescriptor("column"));
      log.append(htd, regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), mvcc), kvs, true);
    }
    // Send the data to HDFS datanodes and close the HDFS writer
    log.sync();
    ((FSHLog) log).replaceWriter(((FSHLog)log).getOldPath(), null, null, null);

    /* code taken from MasterFileSystem.getLogDirs(), which is called from MasterFileSystem.splitLog()
     * handles RS shutdowns (as observed by the splitting process)
     */
    // rename the directory so a rogue RS doesn't create more WALs
    Path rsSplitDir = thisTestsDir.suffix(DefaultWALProvider.SPLITTING_EXT);
    if (!fs.rename(thisTestsDir, rsSplitDir)) {
      throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
    }
    LOG.debug("Renamed region directory: " + rsSplitDir);

    LOG.debug("Processing the old log files.");
    WALSplitter.split(HBASEDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);

    LOG.debug("Trying to roll the WAL.");
    try {
      log.rollWriter();
      Assert.fail("rollWriter() did not throw any exception.");
    } catch (IOException ioe) {
      if (ioe.getCause() instanceof FileNotFoundException) {
        LOG.info("Got the expected exception: ", ioe.getCause());
      } else {
        Assert.fail("Unexpected exception: " + ioe);
      }
    }
  } finally {
    wals.close();
    if (fs.exists(thisTestsDir)) {
      fs.delete(thisTestsDir, true);
    }
  }
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,"");
  util.getConfiguration().setInt(
    LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
    MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
      KeyValueCodecWithTags.class.getCanonicalName());
  util.startMiniCluster();

  setupNamespace();
}
项目:ditb    文件:TestLRUDictionary.java   
/**
 * Assert can't add empty array.
 */
@Test
public void testPassingEmptyArrayToFindEntry() {
  assertEquals(Dictionary.NOT_IN_DICTIONARY,
    testee.findEntry(HConstants.EMPTY_BYTE_ARRAY, 0, 0));
  assertEquals(Dictionary.NOT_IN_DICTIONARY,
    testee.addEntry(HConstants.EMPTY_BYTE_ARRAY, 0, 0));
}
项目:ditb    文件:FSTableDescriptors.java   
/**
 * Get the current table descriptor for the given table, or null if none exists.
 *
 * Uses a local cache of the descriptor but still checks the filesystem on each call
 * to see if a newer file has been created since the cached one was read.
 */
@Override
public HTableDescriptor get(final TableName tablename)
throws IOException {
  invocations++;
  if (TableName.META_TABLE_NAME.equals(tablename)) {
    cachehits++;
    return metaTableDescriptor;
  }
  // hbase:meta is already handled. If some one tries to get the descriptor for
  // .logs, .oldlogs or .corrupt throw an exception.
  if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
     throw new IOException("No descriptor found for non table = " + tablename);
  }

  if (usecache) {
    // Look in cache of descriptors.
    HTableDescriptor cachedtdm = this.cache.get(tablename);
    if (cachedtdm != null) {
      cachehits++;
      return cachedtdm;
    }
  }
  HTableDescriptor tdmt = null;
  try {
    tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
  } catch (NullPointerException e) {
    LOG.debug("Exception during readTableDecriptor. Current table name = "
        + tablename, e);
  } catch (IOException ioe) {
    LOG.debug("Exception during readTableDecriptor. Current table name = "
        + tablename, ioe);
  }
  // last HTD written wins
  if (usecache && tdmt != null) {
    this.cache.put(tablename, tdmt);
  }

  return tdmt;
}
项目:ditb    文件:ZKConfig.java   
/**
 * Separate the given key into the three configurations it should contain:
 * hbase.zookeeper.quorum, hbase.zookeeper.client.port
 * and zookeeper.znode.parent
 * @param key
 * @return the three configuration in the described order
 * @throws IOException
 */
public static ZKClusterKey transformClusterKey(String key) throws IOException {
  String[] parts = key.split(":");

  if (parts.length == 3) {
    return new ZKClusterKey(parts [0], Integer.parseInt(parts [1]), parts [2]);
  }

  if (parts.length > 3) {
    // The quorum could contain client port in server:clientport format, try to transform more.
    String zNodeParent = parts [parts.length - 1];
    String clientPort = parts [parts.length - 2];

    // The first part length is the total length minus the lengths of other parts and minus 2 ":"
    int endQuorumIndex = key.length() - zNodeParent.length() - clientPort.length() - 2;
    String quorumStringInput = key.substring(0, endQuorumIndex);
    String[] serverHosts = quorumStringInput.split(",");

    // The common case is that every server has its own client port specified - this means
    // that (total parts - the ZNodeParent part - the ClientPort part) is equal to
    // (the number of "," + 1) - "+ 1" because the last server has no ",".
    if ((parts.length - 2) == (serverHosts.length + 1)) {
      return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent);
    }

    // For the uncommon case that some servers has no port specified, we need to build the
    // server:clientport list using default client port for servers without specified port.
    return new ZKClusterKey(
        buildZKQuorumServerString(serverHosts, clientPort),
        Integer.parseInt(clientPort),
        zNodeParent);
  }

  throw new IOException("Cluster key passed " + key + " is invalid, the format should be:" +
      HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":"
      + HConstants.ZOOKEEPER_ZNODE_PARENT);
}
项目:ditb    文件:RequestConverter.java   
/**
 * Create a protocol buffer MutateRequest for an append
 *
 * @param regionName
 * @param append
 * @return a mutate request
 * @throws IOException
 */
public static MutateRequest buildMutateRequest(final byte[] regionName,
    final Append append, long nonceGroup, long nonce) throws IOException {
  MutateRequest.Builder builder = MutateRequest.newBuilder();
  RegionSpecifier region = buildRegionSpecifier(
    RegionSpecifierType.REGION_NAME, regionName);
  builder.setRegion(region);
  if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) {
    builder.setNonceGroup(nonceGroup);
  }
  builder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, append,
    MutationProto.newBuilder(), nonce));
  return builder.build();
}
项目:ditb    文件:TestWithDisabledAuthorization.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf = TEST_UTIL.getConfiguration();
  // Up the handlers; this test needs more than usual.
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  // Set up superuser
  SecureTestUtil.configureSuperuser(conf);

  // Install the VisibilityController as a system processor
  VisibilityTestUtil.enableVisiblityLabels(conf);

  // Now, DISABLE active authorization
  conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, false);

  TEST_UTIL.startMiniCluster();

  // Wait for the labels table to become available
  TEST_UTIL.waitUntilAllRegionsAssigned(LABELS_TABLE_NAME);

  // create a set of test users
  SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });
  USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);

  // Define test labels
  SUPERUSER.runAs(new PrivilegedExceptionAction<Void>() {
    public Void run() throws Exception {
      try (Connection conn = ConnectionFactory.createConnection(conf)) {
        VisibilityClient.addLabels(conn,
          new String[] { SECRET, CONFIDENTIAL, PRIVATE });
        VisibilityClient.setAuths(conn,
          new String[] { SECRET, CONFIDENTIAL },
          USER_RW.getShortName());
      } catch (Throwable t) {
        fail("Should not have failed");          
      }
      return null;
    }
  });
}