Java 类org.apache.hadoop.mapred.JobTracker 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:MRAdmin.java   
private int refreshAuthorizationPolicy() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));


  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol = 
    (RefreshAuthorizationPolicyProtocol) 
    RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
                 RefreshAuthorizationPolicyProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           RefreshAuthorizationPolicyProtocol.class));

  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRAdmin.java   
private int refreshQueues() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshQueues();

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRAdmin.java   
/**
 * Command to ask the jobtracker to reread the hosts and excluded hosts 
 * file.
 * Usage: java MRAdmin -refreshNodes
 * @exception IOException 
 */
private int refreshNodes() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshNodes();

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRAdmin.java   
/**
 * refreshSuperUserGroupsConfiguration {@link JobTracker}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));

  // Create the client
  RefreshUserMappingsProtocol refreshProtocol = 
    (RefreshUserMappingsProtocol) 
    RPC.getProxy(RefreshUserMappingsProtocol.class, 
                 RefreshUserMappingsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                     RefreshUserMappingsProtocol.class));

  // Refresh the user-to-groups mappings
  refreshProtocol.refreshSuperUserGroupsConfiguration();

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TokenCache.java   
/**
 * get delegation tokens for a specific FS
 * @param fs
 * @param credentials
 * @param p
 * @param conf
 * @throws IOException
 */
private static void obtainTokensForNamenodesInternal(FileSystem fs,
    Credentials credentials, Configuration conf) throws IOException {
  HadoopKerberosName jtKrbName = new HadoopKerberosName(conf.get(JobTracker.JT_USER_NAME, ""));
  String delegTokenRenewer = jtKrbName.getShortName();

  mergeBinaryTokens(credentials, conf);

  final Token<?> tokens[] = fs.addDelegationTokens(delegTokenRenewer,
                                                   credentials);
  if (tokens != null) {
    for (Token<?> token : tokens) {
      LOG.info("Got dt for " + fs.getUri() + "; "+token);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobTrackerJspHelper.java   
/**
 * Generates an XML-formatted block that summarizes the state of the JobTracker.
 */
public void generateSummaryTable(JspWriter out,
                                 JobTracker tracker) throws IOException {
  ClusterStatus status = tracker.getClusterStatus();
  int maxMapTasks = status.getMaxMapTasks();
  int maxReduceTasks = status.getMaxReduceTasks();
  int numTaskTrackers = status.getTaskTrackers();
  String tasksPerNodeStr;
  if (numTaskTrackers > 0) {
    double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers;
    tasksPerNodeStr = percentFormat.format(tasksPerNodePct);
  } else {
    tasksPerNodeStr = "-";
  }
  out.print("<maps>" + status.getMapTasks() + "</maps>\n" +
          "<reduces>" + status.getReduceTasks() + "</reduces>\n" +
          "<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" +
          "<nodes>" + status.getTaskTrackers() + "</nodes>\n" +
          "<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" +
          "<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" +
          "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testDelegationToken() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        Token<DelegationTokenIdentifier> token = jt
            .getDelegationToken(new Text(ugi.getShortUserName()));
        jt.renewDelegationToken(token);
        jt.cancelDelegationToken(token);
      } catch (IOException e) {
        e.printStackTrace();
        throw e;
      }
      return null;
    }
  });
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testGetDelegationTokenWithoutKerberos() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  Assert.assertTrue(UserGroupInformation.isSecurityEnabled());
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        Token<DelegationTokenIdentifier> token = jt
            .getDelegationToken(new Text("arenewer"));
        Assert.assertTrue(token != null);
        Assert
            .fail("Delegation token should not be issued without Kerberos authentication");
      } catch (IOException e) {
        // success
      }
      return null;
    }
  });
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testRenewDelegationTokenWithoutKerberos() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  Assert.assertTrue(UserGroupInformation.isSecurityEnabled());
  final Token<DelegationTokenIdentifier> token = generateDelegationToken(
      "owner", ugi.getShortUserName());
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        jt.renewDelegationToken(token);
        Assert
            .fail("Delegation token should not be renewed without Kerberos authentication");
      } catch (IOException e) {
        // success
      }
      return null;
    }
  });
}
项目:hadoop-EAR    文件:MRAdmin.java   
private int refreshAuthorizationPolicy() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol = 
    (RefreshAuthorizationPolicyProtocol) 
    RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
                 RefreshAuthorizationPolicyProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           RefreshAuthorizationPolicyProtocol.class));

  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();

  return 0;
}
项目:hadoop-EAR    文件:MRAdmin.java   
private int refreshQueueAcls() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshQueueAcls();

  return 0;
}
项目:hadoop-EAR    文件:MRAdmin.java   
/**
 * Command to ask the jobtracker to reread the hosts and excluded hosts 
 * file.
 * Usage: java MRAdmin -refreshNodes
 * @exception IOException 
 */
private int refreshNodes() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshNodes();

  return 0;
}
项目:hadoop-EAR    文件:JobTrackerJspHelper.java   
/**
 * Generates an XML-formatted block that summarizes the state of the JobTracker.
 */
public void generateSummaryTable(JspWriter out,
                                 JobTracker tracker) throws IOException {
  ClusterStatus status = tracker.getClusterStatus();
  int maxMapTasks = status.getMaxMapTasks();
  int maxReduceTasks = status.getMaxReduceTasks();
  int numTaskTrackers = status.getTaskTrackers();
  String tasksPerNodeStr;
  if (numTaskTrackers > 0) {
    double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers;
    tasksPerNodeStr = percentFormat.format(tasksPerNodePct);
  } else {
    tasksPerNodeStr = "-";
  }
  out.print("<maps>" + status.getMapTasks() + "</maps>\n" +
          "<reduces>" + status.getReduceTasks() + "</reduces>\n" +
          "<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" +
          "<nodes>" + status.getTaskTrackers() + "</nodes>\n" +
          "<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" +
          "<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" +
          "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n");
}
项目:hadoop-on-lustre    文件:MRAdmin.java   
private int refreshAuthorizationPolicy() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));


  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol = 
    (RefreshAuthorizationPolicyProtocol) 
    RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
                 RefreshAuthorizationPolicyProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           RefreshAuthorizationPolicyProtocol.class));

  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();

  return 0;
}
项目:hadoop-on-lustre    文件:MRAdmin.java   
private int refreshQueues() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshQueues();

  return 0;
}
项目:hadoop-on-lustre    文件:MRAdmin.java   
/**
 * Command to ask the jobtracker to reread the hosts and excluded hosts 
 * file.
 * Usage: java MRAdmin -refreshNodes
 * @exception IOException 
 */
private int refreshNodes() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshNodes();

  return 0;
}
项目:hadoop-on-lustre    文件:MRAdmin.java   
/**
 * refreshSuperUserGroupsConfiguration {@link JobTracker}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));

  // Create the client
  RefreshUserMappingsProtocol refreshProtocol = 
    (RefreshUserMappingsProtocol) 
    RPC.getProxy(RefreshUserMappingsProtocol.class, 
                 RefreshUserMappingsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                     RefreshUserMappingsProtocol.class));

  // Refresh the user-to-groups mappings
  refreshProtocol.refreshSuperUserGroupsConfiguration();

  return 0;
}
项目:hadoop-on-lustre    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testDelegationToken() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        Token<DelegationTokenIdentifier> token = jt
            .getDelegationToken(new Text(ugi.getShortUserName()));
        jt.renewDelegationToken(token);
        jt.cancelDelegationToken(token);
      } catch (IOException e) {
        e.printStackTrace();
        throw e;
      }
      return null;
    }
  });
}
项目:hadoop-on-lustre    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testGetDelegationTokenWithoutKerberos() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  Assert.assertTrue(UserGroupInformation.isSecurityEnabled());
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        Token<DelegationTokenIdentifier> token = jt
            .getDelegationToken(new Text("arenewer"));
        Assert.assertTrue(token != null);
        Assert
            .fail("Delegation token should not be issued without Kerberos authentication");
      } catch (IOException e) {
        // success
      }
      return null;
    }
  });
}
项目:hadoop-on-lustre    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testRenewDelegationTokenWithoutKerberos() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  Assert.assertTrue(UserGroupInformation.isSecurityEnabled());
  final Token<DelegationTokenIdentifier> token = generateDelegationToken(
      "owner", ugi.getShortUserName());
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        jt.renewDelegationToken(token);
        Assert
            .fail("Delegation token should not be renewed without Kerberos authentication");
      } catch (IOException e) {
        // success
      }
      return null;
    }
  });
}
项目:RDFS    文件:MRAdmin.java   
private int refreshAuthorizationPolicy() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol = 
    (RefreshAuthorizationPolicyProtocol) 
    RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
                 RefreshAuthorizationPolicyProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           RefreshAuthorizationPolicyProtocol.class));

  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();

  return 0;
}
项目:RDFS    文件:MRAdmin.java   
private int refreshQueueAcls() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshQueueAcls();

  return 0;
}
项目:RDFS    文件:MRAdmin.java   
/**
 * Command to ask the jobtracker to reread the hosts and excluded hosts 
 * file.
 * Usage: java MRAdmin -refreshNodes
 * @exception IOException 
 */
private int refreshNodes() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshNodes();

  return 0;
}
项目:RDFS    文件:JobTrackerJspHelper.java   
/**
 * Generates an XML-formatted block that summarizes the state of the JobTracker.
 */
public void generateSummaryTable(JspWriter out,
                                 JobTracker tracker) throws IOException {
  ClusterStatus status = tracker.getClusterStatus();
  int maxMapTasks = status.getMaxMapTasks();
  int maxReduceTasks = status.getMaxReduceTasks();
  int numTaskTrackers = status.getTaskTrackers();
  String tasksPerNodeStr;
  if (numTaskTrackers > 0) {
    double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers;
    tasksPerNodeStr = percentFormat.format(tasksPerNodePct);
  } else {
    tasksPerNodeStr = "-";
  }
  out.print("<maps>" + status.getMapTasks() + "</maps>\n" +
          "<reduces>" + status.getReduceTasks() + "</reduces>\n" +
          "<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" +
          "<nodes>" + status.getTaskTrackers() + "</nodes>\n" +
          "<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" +
          "<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" +
          "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n");
}
项目:hadoop-0.20    文件:MRAdmin.java   
private int refreshAuthorizationPolicy() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol = 
    (RefreshAuthorizationPolicyProtocol) 
    RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
                 RefreshAuthorizationPolicyProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           RefreshAuthorizationPolicyProtocol.class));

  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();

  return 0;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:MRAdmin.java   
private int refreshAuthorizationPolicy() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));


  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol = 
    (RefreshAuthorizationPolicyProtocol) 
    RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
                 RefreshAuthorizationPolicyProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           RefreshAuthorizationPolicyProtocol.class));

  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();

  return 0;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:MRAdmin.java   
private int refreshQueues() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshQueues();

  return 0;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:MRAdmin.java   
/**
 * Command to ask the jobtracker to reread the hosts and excluded hosts 
 * file.
 * Usage: java MRAdmin -refreshNodes
 * @exception IOException 
 */
private int refreshNodes() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshNodes();

  return 0;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:MRAdmin.java   
/**
 * refreshSuperUserGroupsConfiguration {@link JobTracker}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));

  // Create the client
  RefreshUserMappingsProtocol refreshProtocol = 
    (RefreshUserMappingsProtocol) 
    RPC.getProxy(RefreshUserMappingsProtocol.class, 
                 RefreshUserMappingsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                     RefreshUserMappingsProtocol.class));

  // Refresh the user-to-groups mappings
  refreshProtocol.refreshSuperUserGroupsConfiguration();

  return 0;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TokenCache.java   
/**
 * get delegation tokens for a specific FS
 * @param fs
 * @param credentials
 * @param p
 * @param conf
 * @throws IOException
 */
private static void obtainTokensForNamenodesInternal(FileSystem fs,
    Credentials credentials, Configuration conf) throws IOException {
  HadoopKerberosName jtKrbName = new HadoopKerberosName(conf.get(JobTracker.JT_USER_NAME, ""));
  String delegTokenRenewer = jtKrbName.getShortName();

  mergeBinaryTokens(credentials, conf);

  final Token<?> tokens[] = fs.addDelegationTokens(delegTokenRenewer,
                                                   credentials);
  if (tokens != null) {
    for (Token<?> token : tokens) {
      LOG.info("Got dt for " + fs.getUri() + "; "+token);
    }
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:JobTrackerJspHelper.java   
/**
 * Generates an XML-formatted block that summarizes the state of the JobTracker.
 */
public void generateSummaryTable(JspWriter out,
                                 JobTracker tracker) throws IOException {
  ClusterStatus status = tracker.getClusterStatus();
  int maxMapTasks = status.getMaxMapTasks();
  int maxReduceTasks = status.getMaxReduceTasks();
  int numTaskTrackers = status.getTaskTrackers();
  String tasksPerNodeStr;
  if (numTaskTrackers > 0) {
    double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers;
    tasksPerNodeStr = percentFormat.format(tasksPerNodePct);
  } else {
    tasksPerNodeStr = "-";
  }
  out.print("<maps>" + status.getMapTasks() + "</maps>\n" +
          "<reduces>" + status.getReduceTasks() + "</reduces>\n" +
          "<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" +
          "<nodes>" + status.getTaskTrackers() + "</nodes>\n" +
          "<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" +
          "<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" +
          "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n");
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testDelegationToken() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        Token<DelegationTokenIdentifier> token = jt
            .getDelegationToken(new Text(ugi.getShortUserName()));
        jt.renewDelegationToken(token);
        jt.cancelDelegationToken(token);
      } catch (IOException e) {
        e.printStackTrace();
        throw e;
      }
      return null;
    }
  });
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testGetDelegationTokenWithoutKerberos() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  Assert.assertTrue(UserGroupInformation.isSecurityEnabled());
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        Token<DelegationTokenIdentifier> token = jt
            .getDelegationToken(new Text("arenewer"));
        Assert.assertTrue(token != null);
        Assert
            .fail("Delegation token should not be issued without Kerberos authentication");
      } catch (IOException e) {
        // success
      }
      return null;
    }
  });
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestDelegationTokenAuthMethod.java   
@Test
public void testRenewDelegationTokenWithoutKerberos() throws Exception {
  final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
  config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  // Set configuration again so that job tracker finds security enabled
  UserGroupInformation.setConfiguration(config);
  Assert.assertTrue(UserGroupInformation.isSecurityEnabled());
  final Token<DelegationTokenIdentifier> token = generateDelegationToken(
      "owner", ugi.getShortUserName());
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      try {
        jt.renewDelegationToken(token);
        Assert
            .fail("Delegation token should not be renewed without Kerberos authentication");
      } catch (IOException e) {
        // success
      }
      return null;
    }
  });
}
项目:mapreduce-fork    文件:MRAdmin.java   
private int refreshAuthorizationPolicy() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));

  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol = 
    (RefreshAuthorizationPolicyProtocol) 
    RPC.getProxy(RefreshAuthorizationPolicyProtocol.class, 
                 RefreshAuthorizationPolicyProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           RefreshAuthorizationPolicyProtocol.class));

  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();

  return 0;
}
项目:mapreduce-fork    文件:MRAdmin.java   
/**
 * refreshSuperUserGroupsConfiguration {@link JobTracker}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));

  // Create the client
  RefreshUserMappingsProtocol refreshProtocol = 
    (RefreshUserMappingsProtocol) 
    RPC.getProxy(RefreshUserMappingsProtocol.class, 
        RefreshUserMappingsProtocol.versionID, 
        JobTracker.getAddress(conf), getUGI(conf), conf,
        NetUtils.getSocketFactory(conf, 
            RefreshUserMappingsProtocol.class));

  // Refresh the user-to-groups mappings
  refreshProtocol.refreshSuperUserGroupsConfiguration();

  return 0;
}
项目:mapreduce-fork    文件:MRAdmin.java   
/**
 * Refresh the user-to-groups mappings on the {@link JobTracker}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
private int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be JT's one.
  JobConf jConf = new JobConf(conf);
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      jConf.get(JobTracker.JT_USER_NAME, ""));

  // Create the client
  RefreshUserMappingsProtocol refreshProtocol = 
    (RefreshUserMappingsProtocol) 
    RPC.getProxy(RefreshUserMappingsProtocol.class, 
        RefreshUserMappingsProtocol.versionID, 
        JobTracker.getAddress(conf), getUGI(conf), conf,
        NetUtils.getSocketFactory(conf, 
            RefreshUserMappingsProtocol.class));

  // Refresh the user-to-groups mappings
  refreshProtocol.refreshUserToGroupsMappings();

  return 0;
}
项目:mapreduce-fork    文件:MRAdmin.java   
private int refreshQueues() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshQueues();

  return 0;
}
项目:mapreduce-fork    文件:MRAdmin.java   
/**
 * Command to ask the jobtracker to reread the hosts and excluded hosts 
 * file.
 * Usage: java MRAdmin -refreshNodes
 * @exception IOException 
 */
private int refreshNodes() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));

  // Refresh the queue properties
  adminOperationsProtocol.refreshNodes();

  return 0;
}
项目:mapreduce-fork    文件:JobTrackerJspHelper.java   
/**
 * Generates an XML-formatted block that summarizes the state of the JobTracker.
 */
public void generateSummaryTable(JspWriter out,
                                 JobTracker tracker) throws IOException {
  ClusterStatus status = tracker.getClusterStatus();
  int maxMapTasks = status.getMaxMapTasks();
  int maxReduceTasks = status.getMaxReduceTasks();
  int numTaskTrackers = status.getTaskTrackers();
  String tasksPerNodeStr;
  if (numTaskTrackers > 0) {
    double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers;
    tasksPerNodeStr = percentFormat.format(tasksPerNodePct);
  } else {
    tasksPerNodeStr = "-";
  }
  out.print("<maps>" + status.getMapTasks() + "</maps>\n" +
          "<reduces>" + status.getReduceTasks() + "</reduces>\n" +
          "<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" +
          "<nodes>" + status.getTaskTrackers() + "</nodes>\n" +
          "<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" +
          "<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" +
          "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n");
}