Java 类org.apache.hadoop.hbase.security.UserProvider 实例源码

项目:ditb    文件:TestThriftServer.java   
/**
 * Check that checkAndPut fails if the cell does not exist, then put in the cell, then check that
 * the checkAndPut succeeds.
 *
 * @throws Exception
 */
public static void doTestCheckAndPut() throws Exception {
  ThriftServerRunner.HBaseHandler handler =
    new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration(),
      UserProvider.instantiate(UTIL.getConfiguration()));
  handler.createTable(tableAname, getColumnDescriptors());
  try {
    List<Mutation> mutations = new ArrayList<Mutation>(1);
    mutations.add(new Mutation(false, columnAname, valueAname, true));
    Mutation putB = (new Mutation(false, columnBname, valueBname, true));

    assertFalse(handler.checkAndPut(tableAname, rowAname, columnAname, valueAname, putB, null));

    handler.mutateRow(tableAname, rowAname, mutations, null);

    assertTrue(handler.checkAndPut(tableAname, rowAname, columnAname, valueAname, putB, null));

    TRowResult rowResult = handler.getRow(tableAname, rowAname, null).get(0);
    assertEquals(rowAname, rowResult.row);
    assertEquals(valueBname, rowResult.columns.get(columnBname).value);
  } finally {
    handler.disableTable(tableAname);
    handler.deleteTable(tableAname);
  }
}
项目:ditb    文件:SecureBulkLoadEndpoint.java   
@Override
public void start(CoprocessorEnvironment env) {
  this.env = (RegionCoprocessorEnvironment)env;
  random = new SecureRandom();
  conf = env.getConfiguration();
  baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
  this.userProvider = UserProvider.instantiate(conf);

  try {
    fs = FileSystem.get(conf);
    fs.mkdirs(baseStagingDir, PERM_HIDDEN);
    fs.setPermission(baseStagingDir, PERM_HIDDEN);
    //no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
    fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
    FileStatus status = fs.getFileStatus(baseStagingDir);
    if(status == null) {
      throw new IllegalStateException("Failed to create staging directory");
    }
    if(!status.getPermission().equals(PERM_HIDDEN)) {
      throw new IllegalStateException(
          "Directory already exists but permissions aren't set to '-rwx--x--x' ");
    }
  } catch (IOException e) {
    throw new IllegalStateException("Failed to get FileSystem instance",e);
  }
}
项目:ditb    文件:TableAuthManager.java   
/**
 * Returns a new {@code PermissionCache} initialized with permission assignments
 * from the {@code hbase.superuser} configuration key.
 */
private PermissionCache<Permission> initGlobal(Configuration conf) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(conf);
  User user = userProvider.getCurrent();
  if (user == null) {
    throw new IOException("Unable to obtain the current user, " +
        "authorization checks for internal operations will not work correctly!");
  }
  PermissionCache<Permission> newCache = new PermissionCache<Permission>();
  String currentUser = user.getShortName();

  // the system user is always included
  List<String> superusers = Lists.asList(currentUser, conf.getStrings(
      Superusers.SUPERUSER_CONF_KEY, new String[0]));
  if (superusers != null) {
    for (String name : superusers) {
      if (AuthUtil.isGroupPrincipal(name)) {
        newCache.putGroup(AuthUtil.getGroupName(name),
            new Permission(Permission.Action.values()));
      } else {
        newCache.putUser(name, new Permission(Permission.Action.values()));
      }
    }
  }
  return newCache;
}
项目:ditb    文件:TableMapReduceUtil.java   
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
项目:ditb    文件:TableMapReduceUtil.java   
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * @param job The job that requires the permission.
 * @param conf The configuration to use in connecting to the peer cluster
 * @throws IOException When the authentication token cannot be obtained.
 */
public static void initCredentialsForCluster(Job job, Configuration conf)
    throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      Connection peerConn = ConnectionFactory.createConnection(conf);
      try {
        TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
      } finally {
        peerConn.close();
      }
    } catch (InterruptedException e) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.interrupted();
    }
  }
}
项目:ditb    文件:HBaseFsck.java   
private void preCheckPermission() throws IOException, AccessDeniedException {
  if (shouldIgnorePreCheckPermission()) {
    return;
  }

  Path hbaseDir = FSUtils.getRootDir(getConf());
  FileSystem fs = hbaseDir.getFileSystem(getConf());
  UserProvider userProvider = UserProvider.instantiate(getConf());
  UserGroupInformation ugi = userProvider.getCurrent().getUGI();
  FileStatus[] files = fs.listStatus(hbaseDir);
  for (FileStatus file : files) {
    try {
      FSUtils.checkAccess(ugi, file, FsAction.WRITE);
    } catch (AccessDeniedException ace) {
      LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
        + " does not have write perms to " + file.getPath()
        + ". Please rerun hbck as hdfs user " + file.getOwner());
      throw ace;
    }
  }
}
项目:ditb    文件:TestSecureLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());
  util.getConfiguration().setInt(
      LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
      MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
      KeyValueCodecWithTags.class.getCanonicalName());

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);

  setupNamespace();
}
项目:ditb    文件:TestSecureExportSnapshot.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  setUpBaseConf(TEST_UTIL.getConfiguration());

  // set the always on security provider
  UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);

  // setup configuration
  SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());

  TEST_UTIL.startMiniCluster(3);
  TEST_UTIL.startMiniMapReduceCluster();

  // Wait for the ACL table to become available
  TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
}
项目:ditb    文件:TestConnectionCache.java   
/**
 * test for ConnectionCache cleaning expired HConnection
 */
@Test
public void testConnectionChore() throws Exception {
  UTIL.startMiniCluster();

  //1s for clean interval & 5s for maxIdleTime
  ConnectionCache cache = new ConnectionCache(UTIL.getConfiguration(),
      UserProvider.instantiate(UTIL.getConfiguration()), 1000, 5000);
  ConnectionCache.ConnectionInfo info = cache.getCurrentConnection();

  assertEquals(false, info.connection.isClosed());

  Thread.sleep(7000);

  assertEquals(true, info.connection.isClosed());
  UTIL.shutdownMiniCluster();
}
项目:ditb    文件:HConnectionKey.java   
HConnectionKey(Configuration conf) {
  Map<String, String> m = new HashMap<String, String>();
  if (conf != null) {
    for (String property : CONNECTION_PROPERTIES) {
      String value = conf.get(property);
      if (value != null) {
        m.put(property, value);
      }
    }
  }
  this.properties = Collections.unmodifiableMap(m);

  try {
    UserProvider provider = UserProvider.instantiate(conf);
    User currentUser = provider.getCurrent();
    if (currentUser != null) {
      username = currentUser.getName();
    }
  } catch (IOException ioe) {
    ConnectionManager.LOG.warn(
        "Error obtaining current user, skipping username in HConnectionKey", ioe);
  }
}
项目:ditb    文件:TestGetAndPutResource.java   
@Test
public void testMetrics() throws IOException, JAXBException {
  final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
  Response response = client.put(path, Constants.MIMETYPE_BINARY,
      Bytes.toBytes(VALUE_4));
  assertEquals(response.getCode(), 200);
  Thread.yield();
  response = client.get(path, Constants.MIMETYPE_JSON);
  assertEquals(response.getCode(), 200);
  assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
  response = deleteRow(TABLE, ROW_4);
  assertEquals(response.getCode(), 200);

  UserProvider userProvider = UserProvider.instantiate(conf);
  METRICS_ASSERT.assertCounterGt("requests", 2l,
    RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());

  METRICS_ASSERT.assertCounterGt("successfulGet", 0l,
    RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());

  METRICS_ASSERT.assertCounterGt("successfulPut", 0l,
    RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());

  METRICS_ASSERT.assertCounterGt("successfulDelete", 0l,
    RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
}
项目:storm-hbase-1.0.x    文件:HBaseSecurityUtil.java   
public static UserProvider login(Map conf, Configuration hbaseConfig) throws IOException {
    UserProvider provider = UserProvider.instantiate(hbaseConfig);
    if (UserGroupInformation.isSecurityEnabled()) {
        String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY);
        if (keytab != null) {
            hbaseConfig.set(STORM_KEYTAB_FILE_KEY, keytab);
        }
        String userName = (String) conf.get(STORM_USER_NAME_KEY);
        if (userName != null) {
            hbaseConfig.set(STORM_USER_NAME_KEY, userName);
        }
        provider.login(STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY, 
            InetAddress.getLocalHost().getCanonicalHostName());
    }
    return provider;
}
项目:pbase    文件:SecureBulkLoadEndpoint.java   
@Override
public void start(CoprocessorEnvironment env) {
  this.env = (RegionCoprocessorEnvironment)env;
  random = new SecureRandom();
  conf = env.getConfiguration();
  baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
  this.userProvider = UserProvider.instantiate(conf);

  try {
    fs = FileSystem.get(conf);
    fs.mkdirs(baseStagingDir, PERM_HIDDEN);
    fs.setPermission(baseStagingDir, PERM_HIDDEN);
    //no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
    fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
    FileStatus status = fs.getFileStatus(baseStagingDir);
    if(status == null) {
      throw new IllegalStateException("Failed to create staging directory");
    }
    if(!status.getPermission().equals(PERM_HIDDEN)) {
      throw new IllegalStateException(
          "Directory already exists but permissions aren't set to '-rwx--x--x' ");
    }
  } catch (IOException e) {
    throw new IllegalStateException("Failed to get FileSystem instance",e);
  }
}
项目:pbase    文件:TableAuthManager.java   
/**
 * Returns a new {@code PermissionCache} initialized with permission assignments
 * from the {@code hbase.superuser} configuration key.
 */
private PermissionCache<Permission> initGlobal(Configuration conf) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(conf);
  User user = userProvider.getCurrent();
  if (user == null) {
    throw new IOException("Unable to obtain the current user, " +
        "authorization checks for internal operations will not work correctly!");
  }
  PermissionCache<Permission> newCache = new PermissionCache<Permission>();
  String currentUser = user.getShortName();

  // the system user is always included
  List<String> superusers = Lists.asList(currentUser, conf.getStrings(
      AccessControlLists.SUPERUSER_CONF_KEY, new String[0]));
  if (superusers != null) {
    for (String name : superusers) {
      if (AccessControlLists.isGroupPrincipal(name)) {
        newCache.putGroup(AccessControlLists.getGroupName(name),
            new Permission(Permission.Action.values()));
      } else {
        newCache.putUser(name, new Permission(Permission.Action.values()));
      }
    }
  }
  return newCache;
}
项目:pbase    文件:TableMapReduceUtil.java   
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
项目:pbase    文件:TableMapReduceUtil.java   
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * The quorumAddress is the key to the ZK ensemble, which contains:
 * hbase.zookeeper.quorum, hbase.zookeeper.client.port and zookeeper.znode.parent
 *
 * @param job The job that requires the permission.
 * @param quorumAddress string that contains the 3 required configuratins
 * @throws IOException When the authentication token cannot be obtained.
 */
public static void initCredentialsForCluster(Job job, String quorumAddress)
    throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      Configuration peerConf = HBaseConfiguration.create(job.getConfiguration());
      ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress);
      Connection peerConn = ConnectionFactory.createConnection(peerConf);
      try {
        TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
      } finally {
        peerConn.close();
      }
    } catch (InterruptedException e) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.interrupted();
    }
  }
}
项目:pbase    文件:HBaseFsck.java   
private void preCheckPermission() throws IOException, AccessDeniedException {
  if (shouldIgnorePreCheckPermission()) {
    return;
  }

  Path hbaseDir = FSUtils.getRootDir(getConf());
  FileSystem fs = hbaseDir.getFileSystem(getConf());
  UserProvider userProvider = UserProvider.instantiate(getConf());
  UserGroupInformation ugi = userProvider.getCurrent().getUGI();
  FileStatus[] files = fs.listStatus(hbaseDir);
  for (FileStatus file : files) {
    try {
      FSUtils.checkAccess(ugi, file, FsAction.WRITE);
    } catch (AccessDeniedException ace) {
      LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
        + " does not have write perms to " + file.getPath()
        + ". Please rerun hbck as hdfs user " + file.getOwner());
      throw ace;
    }
  }
}
项目:pbase    文件:TestSecureLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());
  util.getConfiguration().setInt(
      LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
      MAX_FILES_PER_REGION_PER_FAMILY);

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);

  setupNamespace();
}
项目:pbase    文件:TestSecureExportSnapshot.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  setUpBaseConf(TEST_UTIL.getConfiguration());

  // set the always on security provider
  UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);

  // setup configuration
  SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());

  TEST_UTIL.startMiniCluster(3);
  TEST_UTIL.startMiniMapReduceCluster();

  // Wait for the ACL table to become available
  TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
}
项目:pbase    文件:HConnectionKey.java   
HConnectionKey(Configuration conf) {
  Map<String, String> m = new HashMap<String, String>();
  if (conf != null) {
    for (String property : CONNECTION_PROPERTIES) {
      String value = conf.get(property);
      if (value != null) {
        m.put(property, value);
      }
    }
  }
  this.properties = Collections.unmodifiableMap(m);

  try {
    UserProvider provider = UserProvider.instantiate(conf);
    User currentUser = provider.getCurrent();
    if (currentUser != null) {
      username = currentUser.getName();
    }
  } catch (IOException ioe) {
    ConnectionManager.LOG.warn(
        "Error obtaining current user, skipping username in HConnectionKey", ioe);
  }
}
项目:hbase-broker    文件:HbaseConfiguration.java   
private Admin getSecuredHBaseClient() throws InterruptedException, URISyntaxException,
    LoginException, IOException {
  LOGGER.info("Trying kerberos authentication");
  KrbLoginManager loginManager =
      KrbLoginManagerFactory.getInstance().getKrbLoginManagerInstance(
          kerberosProperties.getKdc(), kerberosProperties.getRealm());

  Subject subject =
      loginManager.loginWithCredentials(configuration.getUser(), configuration.getPassword()
          .toCharArray());
  loginManager.loginInHadoop(subject, hbaseConf);
  Configuration conf = HBaseConfiguration.create(hbaseConf);
  User user =
      UserProvider.instantiate(conf).create(UserGroupInformation.getUGIFromSubject(subject));
  Connection connection = ConnectionFactory.createConnection(conf, user);
  return connection.getAdmin();
}
项目:HIndex    文件:SecureBulkLoadEndpoint.java   
@Override
public void start(CoprocessorEnvironment env) {
  this.env = (RegionCoprocessorEnvironment)env;
  random = new SecureRandom();
  conf = env.getConfiguration();
  baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
  this.userProvider = UserProvider.instantiate(conf);

  try {
    fs = FileSystem.get(conf);
    fs.mkdirs(baseStagingDir, PERM_HIDDEN);
    fs.setPermission(baseStagingDir, PERM_HIDDEN);
    //no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
    fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
    FileStatus status = fs.getFileStatus(baseStagingDir);
    if(status == null) {
      throw new IllegalStateException("Failed to create staging directory");
    }
    if(!status.getPermission().equals(PERM_HIDDEN)) {
      throw new IllegalStateException(
          "Directory already exists but permissions aren't set to '-rwx--x--x' ");
    }
  } catch (IOException e) {
    throw new IllegalStateException("Failed to get FileSystem instance",e);
  }
}
项目:HIndex    文件:TableAuthManager.java   
/**
 * Returns a new {@code PermissionCache} initialized with permission assignments
 * from the {@code hbase.superuser} configuration key.
 */
private PermissionCache<Permission> initGlobal(Configuration conf) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(conf);
  User user = userProvider.getCurrent();
  if (user == null) {
    throw new IOException("Unable to obtain the current user, " +
        "authorization checks for internal operations will not work correctly!");
  }
  PermissionCache<Permission> newCache = new PermissionCache<Permission>();
  String currentUser = user.getShortName();

  // the system user is always included
  List<String> superusers = Lists.asList(currentUser, conf.getStrings(
      AccessControlLists.SUPERUSER_CONF_KEY, new String[0]));
  if (superusers != null) {
    for (String name : superusers) {
      if (AccessControlLists.isGroupPrincipal(name)) {
        newCache.putGroup(AccessControlLists.getGroupName(name),
            new Permission(Permission.Action.values()));
      } else {
        newCache.putUser(name, new Permission(Permission.Action.values()));
      }
    }
  }
  return newCache;
}
项目:HIndex    文件:TableMapReduceUtil.java   
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      Token<AuthenticationTokenIdentifier> authToken = getAuthToken(job, user);
      if (authToken == null) {
        user.obtainAuthTokenForJob(job);
      } else {
        job.getCredentials().addToken(authToken.getService(), authToken);
      }
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    }
  }
}
项目:HIndex    文件:RESTServlet.java   
/**
 * Constructor with existing configuration
 * @param conf existing configuration
 * @param realUser the login user
 */
RESTServlet(final Configuration conf,
    final UserGroupInformation realUser) {
  this.userProvider = UserProvider.instantiate(conf);
  stoppable = new Stoppable() {
    private volatile boolean isStopped = false;
    @Override public void stop(String why) { isStopped = true;}
    @Override public boolean isStopped() {return isStopped;}
  };

  int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
  int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
  connectionCleaner = new ConnectionCleaner(cleanInterval, maxIdleTime);
  Threads.setDaemonThreadRunning(connectionCleaner.getThread());

  this.realUser = realUser;
  this.conf = conf;
}
项目:HIndex    文件:HBaseFsck.java   
private void preCheckPermission() throws IOException, AccessControlException {
  if (shouldIgnorePreCheckPermission()) {
    return;
  }

  Path hbaseDir = FSUtils.getRootDir(getConf());
  FileSystem fs = hbaseDir.getFileSystem(getConf());
  UserProvider userProvider = UserProvider.instantiate(getConf());
  UserGroupInformation ugi = userProvider.getCurrent().getUGI();
  FileStatus[] files = fs.listStatus(hbaseDir);
  for (FileStatus file : files) {
    try {
      FSUtils.checkAccess(ugi, file, FsAction.WRITE);
    } catch (AccessControlException ace) {
      LOG.warn("Got AccessControlException when preCheckPermission ", ace);
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
        + " does not have write perms to " + file.getPath()
        + ". Please rerun hbck as hdfs user " + file.getOwner());
      throw new AccessControlException(ace);
    }
  }
}
项目:HIndex    文件:TestSecureLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());
  util.getConfiguration().setInt(
      LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
      MAX_FILES_PER_REGION_PER_FAMILY);

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName());
}
项目:HIndex    文件:TestSecureExportSnapshot.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  setUpBaseConf(TEST_UTIL.getConfiguration());

  // set the always on security provider
  UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);

  // setup configuration
  SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());

  TEST_UTIL.startMiniCluster(3);
  TEST_UTIL.startMiniMapReduceCluster();

  // Wait for the ACL table to become available
  TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName());
}
项目:HIndex    文件:HConnectionKey.java   
HConnectionKey(Configuration conf) {
  Map<String, String> m = new HashMap<String, String>();
  if (conf != null) {
    for (String property : CONNECTION_PROPERTIES) {
      String value = conf.get(property);
      if (value != null) {
        m.put(property, value);
      }
    }
  }
  this.properties = Collections.unmodifiableMap(m);

  try {
    UserProvider provider = UserProvider.instantiate(conf);
    User currentUser = provider.getCurrent();
    if (currentUser != null) {
      username = currentUser.getName();
    }
  } catch (IOException ioe) {
    HConnectionManager.LOG.warn("Error obtaining current user, skipping username in HConnectionKey", ioe);
  }
}
项目:tajo    文件:HBaseTablespace.java   
HConnectionKey(Configuration conf) {
  Map<String, String> m = new HashMap<>();
  if (conf != null) {
    for (String property : CONNECTION_PROPERTIES) {
      String value = conf.get(property);
      if (value != null) {
        m.put(property, value);
      }
    }
  }
  this.properties = Collections.unmodifiableMap(m);

  try {
    UserProvider provider = UserProvider.instantiate(conf);
    User currentUser = provider.getCurrent();
    if (currentUser != null) {
      username = currentUser.getName();
    }
  } catch (IOException ioe) {
    LOG.warn("Error obtaining current user, skipping username in HConnectionKey", ioe);
  }
}
项目:hbase    文件:TestThriftServer.java   
/**
 * Check that checkAndPut fails if the cell does not exist, then put in the cell, then check that
 * the checkAndPut succeeds.
 */
public static void doTestCheckAndPut() throws Exception {
  ThriftServerRunner.HBaseHandler handler =
    new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration(),
      UserProvider.instantiate(UTIL.getConfiguration()));
  handler.createTable(tableAname, getColumnDescriptors());
  try {
    List<Mutation> mutations = new ArrayList<>(1);
    mutations.add(new Mutation(false, columnAname, valueAname, true));
    Mutation putB = (new Mutation(false, columnBname, valueBname, true));

    assertFalse(handler.checkAndPut(tableAname, rowAname, columnAname, valueAname, putB, null));

    handler.mutateRow(tableAname, rowAname, mutations, null);

    assertTrue(handler.checkAndPut(tableAname, rowAname, columnAname, valueAname, putB, null));

    TRowResult rowResult = handler.getRow(tableAname, rowAname, null).get(0);
    assertEquals(rowAname, rowResult.row);
    assertEquals(valueBname, rowResult.columns.get(columnBname).value);
  } finally {
    handler.disableTable(tableAname);
    handler.deleteTable(tableAname);
  }
}
项目:hbase    文件:TestSecureExport.java   
/**
 * Sets the security firstly for getting the correct default realm.
 * @throws Exception
 */
@BeforeClass
public static void beforeClass() throws Exception {
  UserProvider.setUserProviderForTesting(UTIL.getConfiguration(), HadoopSecurityEnabledUserProviderForTesting.class);
  setUpKdcServer();
  SecureTestUtil.enableSecurity(UTIL.getConfiguration());
  UTIL.getConfiguration().setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
  VisibilityTestUtil.enableVisiblityLabels(UTIL.getConfiguration());
  SecureTestUtil.verifyConfiguration(UTIL.getConfiguration());
  setUpClusterKdc();
  UTIL.startMiniCluster();
  UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME);
  UTIL.waitUntilAllRegionsAssigned(VisibilityConstants.LABELS_TABLE_NAME);
  UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME, 50000);
  UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME, 50000);
  SecureTestUtil.grantGlobal(UTIL, USER_ADMIN,
          Permission.Action.ADMIN,
          Permission.Action.CREATE,
          Permission.Action.EXEC,
          Permission.Action.READ,
          Permission.Action.WRITE);
  addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER),
          Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET));
}
项目:hbase    文件:TableAuthManager.java   
/**
 * Returns a new {@code PermissionCache} initialized with permission assignments
 * from the {@code hbase.superuser} configuration key.
 */
private PermissionCache<Permission> initGlobal(Configuration conf) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(conf);
  User user = userProvider.getCurrent();
  if (user == null) {
    throw new IOException("Unable to obtain the current user, " +
        "authorization checks for internal operations will not work correctly!");
  }
  PermissionCache<Permission> newCache = new PermissionCache<>();
  String currentUser = user.getShortName();

  // the system user is always included
  List<String> superusers = Lists.asList(currentUser, conf.getStrings(
      Superusers.SUPERUSER_CONF_KEY, new String[0]));
  if (superusers != null) {
    for (String name : superusers) {
      if (AuthUtil.isGroupPrincipal(name)) {
        newCache.putGroup(AuthUtil.getGroupName(name),
            new Permission(Permission.Action.values()));
      } else {
        newCache.putUser(name, new Permission(Permission.Action.values()));
      }
    }
  }
  return newCache;
}
项目:hbase    文件:HBaseFsck.java   
private void preCheckPermission() throws IOException, AccessDeniedException {
  if (shouldIgnorePreCheckPermission()) {
    return;
  }

  Path hbaseDir = FSUtils.getRootDir(getConf());
  FileSystem fs = hbaseDir.getFileSystem(getConf());
  UserProvider userProvider = UserProvider.instantiate(getConf());
  UserGroupInformation ugi = userProvider.getCurrent().getUGI();
  FileStatus[] files = fs.listStatus(hbaseDir);
  for (FileStatus file : files) {
    try {
      FSUtils.checkAccess(ugi, file, FsAction.WRITE);
    } catch (AccessDeniedException ace) {
      LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
        + " does not have write perms to " + file.getPath()
        + ". Please rerun hbck as hdfs user " + file.getOwner());
      throw ace;
    }
  }
}
项目:hbase    文件:TestSecureLoadIncrementalHFiles.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());
  util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
    MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
    KeyValueCodecWithTags.class.getCanonicalName());

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);

  setupNamespace();
}
项目:hbase    文件:TestConnectionCache.java   
/**
 * test for ConnectionCache cleaning expired Connection
 */
@Test
public void testConnectionChore() throws Exception {
  UTIL.startMiniCluster();

  //1s for clean interval & 5s for maxIdleTime
  ConnectionCache cache = new ConnectionCache(UTIL.getConfiguration(),
      UserProvider.instantiate(UTIL.getConfiguration()), 1000, 5000);
  ConnectionCache.ConnectionInfo info = cache.getCurrentConnection();

  assertEquals(false, info.connection.isClosed());

  Thread.sleep(7000);

  assertEquals(true, info.connection.isClosed());
  UTIL.shutdownMiniCluster();
}
项目:hbase    文件:RSGroupAdminEndpoint.java   
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  if (!(env instanceof HasMasterServices)) {
    throw new IOException("Does not implement HMasterServices");
  }

  master = ((HasMasterServices)env).getMasterServices();
  groupInfoManager = RSGroupInfoManagerImpl.getInstance(master);
  groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
  Class<?> clazz =
      master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
  if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
    throw new IOException("Configured balancer does not support RegionServer groups.");
  }
  ZKWatcher zk = ((HasMasterServices)env).getMasterServices().getZooKeeper();
  accessChecker = new AccessChecker(env.getConfiguration(), zk);

  // set the user-provider.
  this.userProvider = UserProvider.instantiate(env.getConfiguration());
}
项目:hbase    文件:TableMapReduceUtil.java   
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
项目:hbase    文件:TableMapReduceUtil.java   
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * @param job The job that requires the permission.
 * @param conf The configuration to use in connecting to the peer cluster
 * @throws IOException When the authentication token cannot be obtained.
 */
public static void initCredentialsForCluster(Job job, Configuration conf)
    throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      Connection peerConn = ConnectionFactory.createConnection(conf);
      try {
        TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
      } finally {
        peerConn.close();
      }
    } catch (InterruptedException e) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.interrupted();
    }
  }
}
项目:hbase    文件:TestMobSecureExportSnapshot.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  setUpBaseConf(TEST_UTIL.getConfiguration());
  // Setup separate test-data directory for MR cluster and set corresponding configurations.
  // Otherwise, different test classes running MR cluster can step on each other.
  TEST_UTIL.getDataTestDir();

  // set the always on security provider
  UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);

  // setup configuration
  SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());

  TEST_UTIL.startMiniCluster(1, 3);
  TEST_UTIL.startMiniMapReduceCluster();

  // Wait for the ACL table to become available
  TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
}