Java 类org.apache.hadoop.fs.CommonConfigurationKeysPublic 实例源码

项目:hadoop-oss    文件:HAServiceTarget.java   
private HAServiceProtocol getProxyForAddress(Configuration conf,
    int timeoutMs, InetSocketAddress addr) throws IOException {
  Configuration confCopy = new Configuration(conf);
  // Lower the timeout so we quickly fail to connect
  confCopy.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
  SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
  return new HAServiceProtocolClientSideTranslatorPB(
      addr,
      confCopy, factory, timeoutMs);
}
项目:hadoop-oss    文件:NetUtils.java   
/**
 * Get the default socket factory as specified by the configuration
 * parameter <tt>hadoop.rpc.socket.factory.default</tt>
 * 
 * @param conf the configuration
 * @return the default socket factory as specified in the configuration or
 *         the JVM default socket factory if the configuration does not
 *         contain a default socket factory property.
 */
public static SocketFactory getDefaultSocketFactory(Configuration conf) {

  String propValue = conf.get(
      CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_DEFAULT);
  if ((propValue == null) || (propValue.length() == 0))
    return SocketFactory.getDefault();

  return getSocketFactoryFromProperty(conf, propValue);
}
项目:hadoop    文件:TestDFSClientFailover.java   
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
项目:hadoop    文件:ConfiguredRMFailoverProxyProvider.java   
@Override
public void init(Configuration configuration, RMProxy<T> rmProxy,
                  Class<T> protocol) {
  this.rmProxy = rmProxy;
  this.protocol = protocol;
  this.rmProxy.checkAllowedProtocols(this.protocol);
  this.conf = new YarnConfiguration(configuration);
  Collection<String> rmIds = HAUtil.getRMHAIds(conf);
  this.rmServiceIds = rmIds.toArray(new String[rmIds.size()]);
  conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]);

  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES,
          YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES));

  conf.setInt(CommonConfigurationKeysPublic.
      IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS,
          YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS));
}
项目:hadoop-oss    文件:Server.java   
ConnectionManager() {
  this.idleScanTimer = new Timer(
      "IPC Server idle connection scanner for port " + getPort(), true);
  this.idleScanThreshold = conf.getInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_DEFAULT);
  this.idleScanInterval = conf.getInt(
      CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,
      CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT);
  this.maxIdleTime = 2 * conf.getInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
  this.maxIdleToClose = conf.getInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_DEFAULT);
  this.maxConnections = conf.getInt(
      CommonConfigurationKeysPublic.IPC_SERVER_MAX_CONNECTIONS_KEY,
      CommonConfigurationKeysPublic.IPC_SERVER_MAX_CONNECTIONS_DEFAULT);
  // create a set with concurrency -and- a thread-safe iterator, add 2
  // for listener and idle closer threads
  this.connections = Collections.newSetFromMap(
      new ConcurrentHashMap<Connection,Boolean>(
          maxQueueSize, 0.75f, readThreads+2));
}
项目:hadoop    文件:RackResolver.java   
public synchronized static void init(Configuration conf) {
  if (initCalled) {
    return;
  } else {
    initCalled = true;
  }
  Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
    conf.getClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
      ScriptBasedMapping.class,
      DNSToSwitchMapping.class);
  try {
    DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
        dnsToSwitchMappingClass, conf);
    // Wrap around the configured class with the Cached implementation so as
    // to save on repetitive lookups.
    // Check if the impl is already caching, to avoid double caching.
    dnsToSwitchMapping =
        ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
            : new CachedDNSToSwitchMapping(newInstance));
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
项目:hadoop    文件:TestRackResolver.java   
@Test
public void testCaching() {
  Configuration conf = new Configuration();
  conf.setClass(
    CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
    MyResolver.class, DNSToSwitchMapping.class);
  RackResolver.init(conf);
  try {
    InetAddress iaddr = InetAddress.getByName("host1");
    MyResolver.resolvedHost1 = iaddr.getHostAddress();
  } catch (UnknownHostException e) {
    // Ignore if not found
  }
  Node node = RackResolver.resolve("host1");
  Assert.assertEquals("/rack1", node.getNetworkLocation());
  node = RackResolver.resolve("host1");
  Assert.assertEquals("/rack1", node.getNetworkLocation());
  node = RackResolver.resolve(invalidHost);
  Assert.assertEquals(NetworkTopology.DEFAULT_RACK, node.getNetworkLocation());
}
项目:hadoop    文件:TestContainerManagerSecurity.java   
@Parameters
public static Collection<Object[]> configs() {
  Configuration configurationWithoutSecurity = new Configuration();
  configurationWithoutSecurity.set(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");

  Configuration configurationWithSecurity = new Configuration();
  configurationWithSecurity.set(
    CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  configurationWithSecurity.set(
    YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal);
  configurationWithSecurity.set(
    YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
    httpSpnegoKeytabFile.getAbsolutePath());
  configurationWithSecurity.set(
    YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal);
  configurationWithSecurity.set(
    YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
    httpSpnegoKeytabFile.getAbsolutePath());

  return Arrays.asList(new Object[][] { { configurationWithoutSecurity },
      { configurationWithSecurity } });
}
项目:hadoop    文件:ResourceLocalizationService.java   
Server createServer() {
  Configuration conf = getConfig();
  YarnRPC rpc = YarnRPC.create(conf);
  if (UserGroupInformation.isSecurityEnabled()) {
    secretManager = new LocalizerTokenSecretManager();      
  }

  Server server = rpc.getServer(LocalizationProtocol.class, this,
      localizationServerAddress, conf, secretManager, 
      conf.getInt(YarnConfiguration.NM_LOCALIZER_CLIENT_THREAD_COUNT, 
          YarnConfiguration.DEFAULT_NM_LOCALIZER_CLIENT_THREAD_COUNT));

  // Enable service authorization?
  if (conf.getBoolean(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
      false)) {
    server.refreshServiceAcl(conf, new NMPolicyProvider());
  }

  return server;
}
项目:hadoop    文件:ApplicationMasterLauncher.java   
@Override
protected void serviceInit(Configuration conf) throws Exception {
  int threadCount = conf.getInt(
      YarnConfiguration.RM_AMLAUNCHER_THREAD_COUNT,
      YarnConfiguration.DEFAULT_RM_AMLAUNCHER_THREAD_COUNT);
  ThreadFactory tf = new ThreadFactoryBuilder()
      .setNameFormat("ApplicationMasterLauncher #%d")
      .build();
  launcherPool = new ThreadPoolExecutor(threadCount, threadCount, 1,
      TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
  launcherPool.setThreadFactory(tf);

  Configuration newConf = new YarnConfiguration(conf);
  newConf.setInt(CommonConfigurationKeysPublic.
          IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      conf.getInt(YarnConfiguration.RM_NODEMANAGER_CONNECT_RETIRES,
          YarnConfiguration.DEFAULT_RM_NODEMANAGER_CONNECT_RETIRES));
  setConfig(newConf);
  super.serviceInit(newConf);
}
项目:hadoop    文件:AdminService.java   
private void refreshAll() throws ServiceFailedException {
  try {
    refreshQueues(RefreshQueuesRequest.newInstance());
    refreshNodes(RefreshNodesRequest.newInstance());
    refreshSuperUserGroupsConfiguration(
        RefreshSuperUserGroupsConfigurationRequest.newInstance());
    refreshUserToGroupsMappings(
        RefreshUserToGroupsMappingsRequest.newInstance());
    if (getConfig().getBoolean(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
        false)) {
      refreshServiceAcls(RefreshServiceAclsRequest.newInstance());
    }
  } catch (Exception ex) {
    throw new ServiceFailedException(ex.getMessage());
  }
}
项目:hadoop    文件:TestDelegationTokenRenewer.java   
@Before
public void setUp() throws Exception {
  counter = new AtomicInteger(0);
  conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);
  eventQueue = new LinkedBlockingQueue<Event>();
  dispatcher = new AsyncDispatcher(eventQueue);
  Renewer.reset();
  delegationTokenRenewer = createNewDelegationTokenRenewer(conf, counter);
  RMContext mockContext =  mock(RMContext.class);
  ClientRMService mockClientRMService = mock(ClientRMService.class);
  when(mockContext.getSystemCredentialsForApps()).thenReturn(
    new ConcurrentHashMap<ApplicationId, ByteBuffer>());
  when(mockContext.getDelegationTokenRenewer()).thenReturn(
      delegationTokenRenewer);
  when(mockContext.getDispatcher()).thenReturn(dispatcher);
  when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
  InetSocketAddress sockAddr =
      InetSocketAddress.createUnresolved("localhost", 1234);
  when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
  delegationTokenRenewer.setRMContext(mockContext);
  delegationTokenRenewer.init(conf);
  delegationTokenRenewer.start();
}
项目:hadoop    文件:Client.java   
/**
 * Returns a ConnectionId object. 
 * @param addr Remote address for the connection.
 * @param protocol Protocol for RPC.
 * @param ticket UGI
 * @param rpcTimeout timeout
 * @param conf Configuration object
 * @return A ConnectionId instance
 * @throws IOException
 */
static ConnectionId getConnectionId(InetSocketAddress addr,
    Class<?> protocol, UserGroupInformation ticket, int rpcTimeout,
    RetryPolicy connectionRetryPolicy, Configuration conf) throws IOException {

  if (connectionRetryPolicy == null) {
    final int max = conf.getInt(
        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT);
    final int retryInterval = conf.getInt(
        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY,
        CommonConfigurationKeysPublic
            .IPC_CLIENT_CONNECT_RETRY_INTERVAL_DEFAULT);

    connectionRetryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
        max, retryInterval, TimeUnit.MILLISECONDS);
  }

  return new ConnectionId(addr, protocol, ticket, rpcTimeout,
      connectionRetryPolicy, conf);
}
项目:hadoop    文件:TestRMWebServicesDelegationTokens.java   
private Injector getKerberosAuthInjector() {
  return Guice.createInjector(new TestServletModule() {
    @Override
    protected void configureServlets() {
      isKerberosAuth = true;
      rmconf.set(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
        "kerberos");
      rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY,
        httpSpnegoPrincipal);
      rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
        httpSpnegoKeytabFile.getAbsolutePath());
      rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY,
        httpSpnegoPrincipal);
      rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
        httpSpnegoKeytabFile.getAbsolutePath());

      super.configureServlets();
    }
  });
}
项目:hadoop    文件:TestRMWebServicesHttpStaticUserPermissions.java   
private static void setupAndStartRM() throws Exception {
  Configuration rmconf = new Configuration();
  rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
    YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
  rmconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
    ResourceScheduler.class);
  rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
  rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
    "kerberos");
  rmconf.set("yarn.resourcemanager.principal", spnegoPrincipal);
  rmconf.set("yarn.resourcemanager.keytab",
      spnegoKeytabFile.getAbsolutePath());
  rmconf.setBoolean("mockrm.webapp.enabled", true);
  UserGroupInformation.setConfiguration(rmconf);
  rm = new MockRM(rmconf);
  rm.start();

}
项目:hadoop    文件:TestRMAdminService.java   
@Test
public void testServiceAclsRefreshWithLocalConfigurationProvider() {
  configuration.setBoolean(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
  ResourceManager resourceManager = null;

  try {
    resourceManager = new ResourceManager();
    resourceManager.init(configuration);
    resourceManager.start();
    resourceManager.adminService.refreshServiceAcls(RefreshServiceAclsRequest
        .newInstance());
  } catch (Exception ex) {
    fail("Using localConfigurationProvider. Should not get any exception.");
  } finally {
    if (resourceManager != null) {
      resourceManager.stop();
    }
  }
}
项目:hadoop    文件:TestAMRMClientContainerRequest.java   
@Test (expected = InvalidContainerRequestException.class)
public void testDifferentLocalityRelaxationSamePriority() {
  AMRMClientImpl<ContainerRequest> client =
      new AMRMClientImpl<ContainerRequest>();
  Configuration conf = new Configuration();
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      MyResolver.class, DNSToSwitchMapping.class);
  client.init(conf);

  Resource capability = Resource.newInstance(1024, 1, 1);
  ContainerRequest request1 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request1);
  ContainerRequest request2 =
      new ContainerRequest(capability, new String[] {"host3"},
          null, Priority.newInstance(1), true);
  client.addContainerRequest(request2);
}
项目:hadoop    文件:TestAMRMClientContainerRequest.java   
@Test (expected = InvalidContainerRequestException.class)
public void testLocalityRelaxationDifferentLevels() {
  AMRMClientImpl<ContainerRequest> client =
      new AMRMClientImpl<ContainerRequest>();
  Configuration conf = new Configuration();
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      MyResolver.class, DNSToSwitchMapping.class);
  client.init(conf);

  Resource capability = Resource.newInstance(1024, 1, 1);
  ContainerRequest request1 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request1);
  ContainerRequest request2 =
      new ContainerRequest(capability, null,
          new String[] {"rack1"}, Priority.newInstance(1), true);
  client.addContainerRequest(request2);
}
项目:hadoop    文件:TestQuorumJournalManager.java   
@Before
public void setup() throws Exception {
  conf = new Configuration();
  // Don't retry connections - it just slows down the tests.
  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);

  cluster = new MiniJournalCluster.Builder(conf)
    .build();

  qjm = createSpyingQJM();
  spies = qjm.getLoggerSetForTests().getLoggersForTests();

  qjm.format(QJMTestUtil.FAKE_NSINFO);
  qjm.recoverUnfinalizedSegments();
  assertEquals(1, qjm.getLoggerSetForTests().getEpoch());
}
项目:hadoop    文件:ClientServiceDelegate.java   
public ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm,
    JobID jobId, MRClientProtocol historyServerProxy) {
  this.conf = new Configuration(conf); // Cloning for modifying.
  // For faster redirects from AM to HS.
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES,
          MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES));
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS,
          MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS));
  this.rm = rm;
  this.jobId = jobId;
  this.historyServerProxy = historyServerProxy;
  this.appId = TypeConverter.toYarn(jobId).getAppId();
  notRunningJobs = new HashMap<JobState, HashMap<String, NotRunningJob>>();
}
项目:hadoop    文件:IPFailoverProxyProvider.java   
public IPFailoverProxyProvider(Configuration conf, URI uri,
    Class<T> xface) {
  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface class %s is not a valid NameNode protocol!");
  this.xface = xface;
  this.nameNodeUri = uri;

  this.conf = new Configuration(conf);
  int maxRetries = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      maxRetries);

  int maxRetriesOnSocketTimeouts = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      maxRetriesOnSocketTimeouts);
}
项目:hadoop    文件:ClientDatanodeProtocolTranslatorPB.java   
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
    DatanodeID datanodeid, Configuration conf, int socketTimeout,
    boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }

  // Since we're creating a new UserGroupInformation here, we know that no
  // future RPC proxies will be able to re-use the same connection. And
  // usages of this proxy tend to be one-off calls.
  //
  // This is a temporary fix: callers should really achieve this by using
  // RPC.stopProxy() on the resulting object, but this is currently not
  // working in trunk. See the discussion on HDFS-1965.
  Configuration confWithNoIpcIdle = new Configuration(conf);
  confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
      .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);

  UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
  ticket.addToken(locatedBlock.getBlockToken());
  return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
项目:hadoop    文件:IPCLoggerChannel.java   
protected QJournalProtocol createProxy() throws IOException {
  final Configuration confCopy = new Configuration(conf);

  // Need to set NODELAY or else batches larger than MTU can trigger 
  // 40ms nagling delays.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
      true);

  RPC.setProtocolEngine(confCopy,
      QJournalProtocolPB.class, ProtobufRpcEngine.class);
  return SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<QJournalProtocol>() {
        @Override
        public QJournalProtocol run() throws IOException {
          RPC.setProtocolEngine(confCopy,
              QJournalProtocolPB.class, ProtobufRpcEngine.class);
          QJournalProtocolPB pbproxy = RPC.getProxy(
              QJournalProtocolPB.class,
              RPC.getProtocolVersion(QJournalProtocolPB.class),
              addr, confCopy);
          return new QJournalProtocolTranslatorPB(pbproxy);
        }
      });
}
项目:hadoop    文件:TestParam.java   
@Test
public void testBufferSizeParam() {
  final BufferSizeParam p = new BufferSizeParam(BufferSizeParam.DEFAULT);
  Assert.assertEquals(null, p.getValue());
  Assert.assertEquals(
      conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
          CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),
      p.getValue(conf));

  new BufferSizeParam(1);

  try {
    new BufferSizeParam(0);
    Assert.fail();
  } catch(IllegalArgumentException e) {
    LOG.info("EXPECTED: " + e);
  }
}
项目:hadoop    文件:TestBootstrapStandbyWithQJM.java   
@Before
public void setup() throws Exception {
  Configuration conf = new Configuration();
  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);

  MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).build();
  cluster = miniQjmHaCluster.getDfsCluster();
  jCluster = miniQjmHaCluster.getJournalCluster();

  // make nn0 active
  cluster.transitionToActive(0);
  // do sth to generate in-progress edit log data
  DistributedFileSystem dfs = (DistributedFileSystem) 
      HATestUtil.configureFailoverFs(cluster, conf);
  dfs.mkdirs(new Path("/test2"));
  dfs.close();
}
项目:hadoop-oss    文件:SaslPropertiesResolver.java   
@Override
public void setConf(Configuration conf) {
  this.conf = conf;
  properties = new TreeMap<String,String>();
  String[] qop = conf.getTrimmedStrings(
      CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION,
      QualityOfProtection.AUTHENTICATION.toString());
  for (int i=0; i < qop.length; i++) {
    qop[i] = QualityOfProtection.valueOf(
        StringUtils.toUpperCase(qop[i])).getSaslQop();
  }
  properties.put(Sasl.QOP, StringUtils.join(",", qop));
  properties.put(Sasl.SERVER_AUTH, "true");
}
项目:hadoop-oss    文件:ProxyUsers.java   
/**
 * Returns an instance of ImpersonationProvider.
 * Looks up the configuration to see if there is custom class specified.
 * @param conf
 * @return ImpersonationProvider
 */
private static ImpersonationProvider getInstance(Configuration conf) {
  Class<? extends ImpersonationProvider> clazz =
      conf.getClass(
          CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS,
          DefaultImpersonationProvider.class, ImpersonationProvider.class);
  return ReflectionUtils.newInstance(clazz, conf);
}
项目:hadoop-oss    文件:HAServiceTarget.java   
/**
 * @return a proxy to the ZKFC which is associated with this HA service.
 */
public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs)
    throws IOException {
  Configuration confCopy = new Configuration(conf);
  // Lower the timeout so we quickly fail to connect
  confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
  SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
  return new ZKFCProtocolClientSideTranslatorPB(
      getZKFCAddress(),
      confCopy, factory, timeoutMs);
}
项目:hadoop-oss    文件:Client.java   
ConnectionId(InetSocketAddress address, Class<?> protocol, 
             UserGroupInformation ticket, int rpcTimeout,
             RetryPolicy connectionRetryPolicy, Configuration conf) {
  this.protocol = protocol;
  this.address = address;
  this.ticket = ticket;
  this.rpcTimeout = rpcTimeout;
  this.connectionRetryPolicy = connectionRetryPolicy;

  this.maxIdleTime = conf.getInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
  this.maxRetriesOnSasl = conf.getInt(
      CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
      CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
  this.maxRetriesOnSocketTimeouts = conf.getInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.tcpNoDelay = conf.getBoolean(
      CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
      CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT);
  this.tcpLowLatency = conf.getBoolean(
      CommonConfigurationKeysPublic.IPC_CLIENT_LOW_LATENCY,
      CommonConfigurationKeysPublic.IPC_CLIENT_LOW_LATENCY_DEFAULT
      );
  this.doPing = conf.getBoolean(
      CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
      CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT);
  this.pingInterval = (doPing ? Client.getPingInterval(conf) : 0);
  this.conf = conf;
}
项目:hadoop    文件:TestDFSAdminWithHA.java   
private void setHAConf(Configuration conf, String nn1Addr, String nn2Addr) {
  conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
      "hdfs://" + NSID);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);
  conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
  conf.set(DFSUtil.addKeySuffixes(
          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn1"), nn1Addr);
  conf.set(DFSUtil.addKeySuffixes(
          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn2"), nn2Addr);
}
项目:hadoop-oss    文件:TestProxyUsers.java   
@Test
public void testProxyUsersWithProviderOverride() throws Exception {
  Configuration conf = new Configuration();
  conf.set(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS,
      "org.apache.hadoop.security.authorize.TestProxyUsers$TestDummyImpersonationProvider");
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);

  // First try proxying a group that's allowed
  UserGroupInformation realUserUgi = UserGroupInformation
  .createUserForTesting(REAL_USER_NAME, SUDO_GROUP_NAMES);
  UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
      PROXY_USER_NAME, realUserUgi, GROUP_NAMES);

  // From good IP
  assertAuthorized(proxyUserUgi, "1.2.3.4");
  // From bad IP
  assertAuthorized(proxyUserUgi, "1.2.3.5");

  // Now try proxying a group that's not allowed
  realUserUgi = UserGroupInformation
  .createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
  proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
      PROXY_USER_NAME, realUserUgi, GROUP_NAMES);

  // From good IP
  assertNotAuthorized(proxyUserUgi, "1.2.3.4");
  // From bad IP
  assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
项目:hadoop-oss    文件:TestUserGroupInformation.java   
/** Test hasSufficientTimeElapsed method */
@Test (timeout = 30000)
public void testHasSufficientTimeElapsed() throws Exception {
  // Make hasSufficientTimeElapsed public
  Method method = UserGroupInformation.class
          .getDeclaredMethod("hasSufficientTimeElapsed", long.class);
  method.setAccessible(true);

  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  User user = ugi.getSubject().getPrincipals(User.class).iterator().next();
  long now = System.currentTimeMillis();

  // Using default relogin time (1 minute)
  user.setLastLogin(now - 2 * 60 * 1000);  // 2 minutes before "now"
  assertTrue((Boolean)method.invoke(ugi, now));
  user.setLastLogin(now - 30 * 1000);      // 30 seconds before "now"
  assertFalse((Boolean)method.invoke(ugi, now));

  // Using relogin time of 10 minutes
  Configuration conf2 = new Configuration(conf);
  conf2.setLong(
     CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN,
     10 * 60);
  UserGroupInformation.setConfiguration(conf2);
  user.setLastLogin(now - 15 * 60 * 1000); // 15 minutes before "now"
  assertTrue((Boolean)method.invoke(ugi, now));
  user.setLastLogin(now - 6 * 60 * 1000);  // 6 minutes before "now"
  assertFalse((Boolean)method.invoke(ugi, now));
  // Restore original conf to UGI
  UserGroupInformation.setConfiguration(conf);

  // Restore hasSufficientTimElapsed back to private
  method.setAccessible(false);
}
项目:hadoop-oss    文件:TestHttpServerLogs.java   
@Test
public void testLogsEnabled() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(
      CommonConfigurationKeysPublic.HADOOP_HTTP_LOGS_ENABLED, true);
  startServer(conf);
  URL url = new URL("http://"
      + NetUtils.getHostPortString(server.getConnectorAddress(0)) + "/logs");
  HttpURLConnection conn = (HttpURLConnection)url.openConnection();
  assertEquals(HttpStatus.SC_OK, conn.getResponseCode());
}
项目:hadoop-oss    文件:TestHttpServerLogs.java   
@Test
public void testLogsDisabled() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(
      CommonConfigurationKeysPublic.HADOOP_HTTP_LOGS_ENABLED, false);
  startServer(conf);
  URL url = new URL(baseUrl + "/logs");
  HttpURLConnection conn = (HttpURLConnection)url.openConnection();
  assertEquals(HttpStatus.SC_NOT_FOUND, conn.getResponseCode());
}
项目:hadoop-oss    文件:TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java   
@BeforeClass
public static void init() throws Exception {
  GenericTestUtils.assumeInNativeProfile();
  Configuration conf = new Configuration();
  conf.set(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
      OpensslAesCtrCryptoCodec.class.getName());
  codec = CryptoCodec.getInstance(conf);
  assertNotNull("Unable to instantiate codec " +
      OpensslAesCtrCryptoCodec.class.getName() + ", is the required "
      + "version of OpenSSL installed?", codec);
  assertEquals(OpensslAesCtrCryptoCodec.class.getCanonicalName(),
      codec.getClass().getCanonicalName());
}
项目:hadoop-oss    文件:TestCryptoStreamsWithJceAesCtrCryptoCodec.java   
@BeforeClass
public static void init() throws Exception {
  Configuration conf = new Configuration();
  conf.set(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
      JceAesCtrCryptoCodec.class.getName());
  codec = CryptoCodec.getInstance(conf);
  Assert.assertEquals(JceAesCtrCryptoCodec.class.getCanonicalName(),
      codec.getClass().getCanonicalName());
}
项目:hadoop-oss    文件:TestIPC.java   
@Test(timeout=60000)
public void testConnectionRetriesOnSocketTimeoutExceptions() throws IOException {
  Configuration conf = new Configuration();
  // set max retries to 0
  conf.setInt(
    CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
    0);
  assertRetriesOnSocketTimeouts(conf, 1);

  // set max retries to 3
  conf.setInt(
    CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
    3);
  assertRetriesOnSocketTimeouts(conf, 4);
}
项目:hadoop    文件:TestRackResolverScriptBasedMapping.java   
@Test
public void testScriptName() {
  Configuration conf = new Configuration();
  conf
      .setClass(
          CommonConfigurationKeysPublic.
              NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
          ScriptBasedMapping.class, DNSToSwitchMapping.class);
  conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
      "testScript");
  RackResolver.init(conf);
  Assert.assertEquals(RackResolver.getDnsToSwitchMapping().toString(),
      "script-based mapping with script testScript");
}