Java 类org.apache.hadoop.yarn.api.records.QueueState 实例源码

项目:hadoop    文件:FifoScheduler.java   
@Override
public QueueInfo getQueueInfo( 
    boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
  queueInfo.setCapacity(1.0f);
  if (clusterResource.getMemory() == 0) {
    queueInfo.setCurrentCapacity(0.0f);
  } else {
    queueInfo.setCurrentCapacity((float) usedResource.getMemory()
        / clusterResource.getMemory());
  }
  queueInfo.setMaximumCapacity(1.0f);
  queueInfo.setChildQueues(new ArrayList<QueueInfo>());
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:hadoop    文件:TestRMWebServices.java   
public void verifyClusterSchedulerFifoGeneric(String type, String state,
    float capacity, float usedCapacity, int minQueueCapacity,
    int maxQueueCapacity, int numNodes, int usedNodeCapacity,
    int availNodeCapacity, int totalNodeCapacity, int numContainers)
    throws JSONException, Exception {

  assertEquals("type doesn't match", "fifoScheduler", type);
  assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state);
  assertEquals("capacity doesn't match", 1.0, capacity, 0.0);
  assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0);
  assertEquals(
      "minQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
      minQueueCapacity);
  assertEquals("maxQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      maxQueueCapacity);
  assertEquals("numNodes doesn't match", 0, numNodes);
  assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity);
  assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity);
  assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity);
  assertEquals("numContainers doesn't match", 0, numContainers);

}
项目:hadoop    文件:TestYarnCLI.java   
@Test
public void testGetQueueInfo() throws Exception {
  QueueCLI cli = createAndGetQueueCLI();
  Set<String> nodeLabels = new HashSet<String>();
  nodeLabels.add("GPU");
  nodeLabels.add("JDK_7");
  QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
      null, null, QueueState.RUNNING, nodeLabels, "GPU");
  when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
  int result = cli.run(new String[] { "-status", "queueA" });
  assertEquals(0, result);
  verify(client).getQueueInfo("queueA");
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Queue Information : ");
  pw.println("Queue Name : " + "queueA");
  pw.println("\tState : " + "RUNNING");
  pw.println("\tCapacity : " + "40.0%");
  pw.println("\tCurrent Capacity : " + "50.0%");
  pw.println("\tMaximum Capacity : " + "80.0%");
  pw.println("\tDefault Node Label expression : " + "GPU");
  pw.println("\tAccessible Node Labels : " + "JDK_7,GPU");
  pw.close();
  String queueInfoStr = baos.toString("UTF-8");
  Assert.assertEquals(queueInfoStr, sysOutStream.toString());
}
项目:hadoop    文件:TestYarnCLI.java   
@Test
public void testGetQueueInfoWithEmptyNodeLabel() throws Exception {
  QueueCLI cli = createAndGetQueueCLI();
  QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
      null, null, QueueState.RUNNING, null, null);
  when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
  int result = cli.run(new String[] { "-status", "queueA" });
  assertEquals(0, result);
  verify(client).getQueueInfo("queueA");
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Queue Information : ");
  pw.println("Queue Name : " + "queueA");
  pw.println("\tState : " + "RUNNING");
  pw.println("\tCapacity : " + "40.0%");
  pw.println("\tCurrent Capacity : " + "50.0%");
  pw.println("\tMaximum Capacity : " + "80.0%");
  pw.println("\tDefault Node Label expression : ");
  pw.println("\tAccessible Node Labels : ");
  pw.close();
  String queueInfoStr = baos.toString("UTF-8");
  Assert.assertEquals(queueInfoStr, sysOutStream.toString());
}
项目:hadoop    文件:TestTypeConverter.java   
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));

  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }

  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }

  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }

  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
项目:hadoop    文件:TestTypeConverter.java   
/**
 * Test that child queues are converted too during conversion of the parent
 * queue
 */
@Test
public void testFromYarnQueue() {
  //Define child queue
  org.apache.hadoop.yarn.api.records.QueueInfo child =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);

  //Define parent queue
  org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
    new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
  children.add(child); //Add one child
  Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
  Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);

  //Call the function we're testing
  org.apache.hadoop.mapreduce.QueueInfo returned =
    TypeConverter.fromYarn(queueInfo, new Configuration());

  //Verify that the converted queue has the 1 child we had added
  Assert.assertEquals("QueueInfo children weren't properly converted",
    returned.getQueueChildren().size(), 1);
}
项目:aliyun-oss-hadoop-fs    文件:FifoScheduler.java   
@Override
public QueueInfo getQueueInfo( 
    boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
  queueInfo.setCapacity(1.0f);
  if (clusterResource.getMemory() == 0) {
    queueInfo.setCurrentCapacity(0.0f);
  } else {
    queueInfo.setCurrentCapacity((float) usedResource.getMemory()
        / clusterResource.getMemory());
  }
  queueInfo.setMaximumCapacity(1.0f);
  queueInfo.setChildQueues(new ArrayList<QueueInfo>());
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:aliyun-oss-hadoop-fs    文件:TestRMWebServices.java   
public void verifyClusterSchedulerFifoGeneric(String type, String state,
    float capacity, float usedCapacity, int minQueueCapacity,
    int maxQueueCapacity, int numNodes, int usedNodeCapacity,
    int availNodeCapacity, int totalNodeCapacity, int numContainers)
    throws JSONException, Exception {

  assertEquals("type doesn't match", "fifoScheduler", type);
  assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state);
  assertEquals("capacity doesn't match", 1.0, capacity, 0.0);
  assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0);
  assertEquals(
      "minQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
      minQueueCapacity);
  assertEquals("maxQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      maxQueueCapacity);
  assertEquals("numNodes doesn't match", 0, numNodes);
  assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity);
  assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity);
  assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity);
  assertEquals("numContainers doesn't match", 0, numContainers);

}
项目:aliyun-oss-hadoop-fs    文件:TestYarnCLI.java   
@Test
public void testGetQueueInfoWithEmptyNodeLabel() throws Exception {
  QueueCLI cli = createAndGetQueueCLI();
  QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
      null, null, QueueState.RUNNING, null, null, null, true);
  when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
  int result = cli.run(new String[] { "-status", "queueA" });
  assertEquals(0, result);
  verify(client).getQueueInfo("queueA");
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Queue Information : ");
  pw.println("Queue Name : " + "queueA");
  pw.println("\tState : " + "RUNNING");
  pw.println("\tCapacity : " + "40.0%");
  pw.println("\tCurrent Capacity : " + "50.0%");
  pw.println("\tMaximum Capacity : " + "80.0%");
  pw.println("\tDefault Node Label expression : "
      + NodeLabel.DEFAULT_NODE_LABEL_PARTITION);
  pw.println("\tAccessible Node Labels : ");
  pw.println("\tPreemption : " + "disabled");
  pw.close();
  String queueInfoStr = baos.toString("UTF-8");
  Assert.assertEquals(queueInfoStr, sysOutStream.toString());
}
项目:aliyun-oss-hadoop-fs    文件:TestTypeConverter.java   
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));

  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }

  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }

  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }

  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestTypeConverter.java   
/**
 * Test that child queues are converted too during conversion of the parent
 * queue
 */
@Test
public void testFromYarnQueue() {
  //Define child queue
  org.apache.hadoop.yarn.api.records.QueueInfo child =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);

  //Define parent queue
  org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
    new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
  children.add(child); //Add one child
  Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
  Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);

  //Call the function we're testing
  org.apache.hadoop.mapreduce.QueueInfo returned =
    TypeConverter.fromYarn(queueInfo, new Configuration());

  //Verify that the converted queue has the 1 child we had added
  Assert.assertEquals("QueueInfo children weren't properly converted",
    returned.getQueueChildren().size(), 1);
}
项目:big-c    文件:FifoScheduler.java   
@Override
public QueueInfo getQueueInfo( 
    boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
  queueInfo.setCapacity(1.0f);
  if (clusterResource.getMemory() == 0) {
    queueInfo.setCurrentCapacity(0.0f);
  } else {
    queueInfo.setCurrentCapacity((float) usedResource.getMemory()
        / clusterResource.getMemory());
  }
  queueInfo.setMaximumCapacity(1.0f);
  queueInfo.setChildQueues(new ArrayList<QueueInfo>());
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:big-c    文件:TestRMWebServices.java   
public void verifyClusterSchedulerFifoGeneric(String type, String state,
    float capacity, float usedCapacity, int minQueueCapacity,
    int maxQueueCapacity, int numNodes, int usedNodeCapacity,
    int availNodeCapacity, int totalNodeCapacity, int numContainers)
    throws JSONException, Exception {

  assertEquals("type doesn't match", "fifoScheduler", type);
  assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state);
  assertEquals("capacity doesn't match", 1.0, capacity, 0.0);
  assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0);
  assertEquals(
      "minQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
      minQueueCapacity);
  assertEquals("maxQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      maxQueueCapacity);
  assertEquals("numNodes doesn't match", 0, numNodes);
  assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity);
  assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity);
  assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity);
  assertEquals("numContainers doesn't match", 0, numContainers);

}
项目:big-c    文件:TestYarnCLI.java   
@Test
public void testGetQueueInfo() throws Exception {
  QueueCLI cli = createAndGetQueueCLI();
  Set<String> nodeLabels = new HashSet<String>();
  nodeLabels.add("GPU");
  nodeLabels.add("JDK_7");
  QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
      null, null, QueueState.RUNNING, nodeLabels, "GPU");
  when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
  int result = cli.run(new String[] { "-status", "queueA" });
  assertEquals(0, result);
  verify(client).getQueueInfo("queueA");
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Queue Information : ");
  pw.println("Queue Name : " + "queueA");
  pw.println("\tState : " + "RUNNING");
  pw.println("\tCapacity : " + "40.0%");
  pw.println("\tCurrent Capacity : " + "50.0%");
  pw.println("\tMaximum Capacity : " + "80.0%");
  pw.println("\tDefault Node Label expression : " + "GPU");
  pw.println("\tAccessible Node Labels : " + "JDK_7,GPU");
  pw.close();
  String queueInfoStr = baos.toString("UTF-8");
  Assert.assertEquals(queueInfoStr, sysOutStream.toString());
}
项目:big-c    文件:TestYarnCLI.java   
@Test
public void testGetQueueInfoWithEmptyNodeLabel() throws Exception {
  QueueCLI cli = createAndGetQueueCLI();
  QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
      null, null, QueueState.RUNNING, null, null);
  when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
  int result = cli.run(new String[] { "-status", "queueA" });
  assertEquals(0, result);
  verify(client).getQueueInfo("queueA");
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Queue Information : ");
  pw.println("Queue Name : " + "queueA");
  pw.println("\tState : " + "RUNNING");
  pw.println("\tCapacity : " + "40.0%");
  pw.println("\tCurrent Capacity : " + "50.0%");
  pw.println("\tMaximum Capacity : " + "80.0%");
  pw.println("\tDefault Node Label expression : ");
  pw.println("\tAccessible Node Labels : ");
  pw.close();
  String queueInfoStr = baos.toString("UTF-8");
  Assert.assertEquals(queueInfoStr, sysOutStream.toString());
}
项目:big-c    文件:TestTypeConverter.java   
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));

  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }

  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }

  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }

  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
项目:big-c    文件:TestTypeConverter.java   
/**
 * Test that child queues are converted too during conversion of the parent
 * queue
 */
@Test
public void testFromYarnQueue() {
  //Define child queue
  org.apache.hadoop.yarn.api.records.QueueInfo child =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);

  //Define parent queue
  org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
    new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
  children.add(child); //Add one child
  Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
  Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);

  //Call the function we're testing
  org.apache.hadoop.mapreduce.QueueInfo returned =
    TypeConverter.fromYarn(queueInfo, new Configuration());

  //Verify that the converted queue has the 1 child we had added
  Assert.assertEquals("QueueInfo children weren't properly converted",
    returned.getQueueChildren().size(), 1);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FifoScheduler.java   
@Override
public QueueInfo getQueueInfo( 
    boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
  queueInfo.setCapacity(1.0f);
  if (clusterResource.getMemory() == 0) {
    queueInfo.setCurrentCapacity(0.0f);
  } else {
    queueInfo.setCurrentCapacity((float) usedResource.getMemory()
        / clusterResource.getMemory());
  }
  queueInfo.setMaximumCapacity(1.0f);
  queueInfo.setChildQueues(new ArrayList<QueueInfo>());
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestRMWebServices.java   
public void verifyClusterSchedulerFifoGeneric(String type, String state,
    float capacity, float usedCapacity, int minQueueCapacity,
    int maxQueueCapacity, int numNodes, int usedNodeCapacity,
    int availNodeCapacity, int totalNodeCapacity, int numContainers)
    throws JSONException, Exception {

  assertEquals("type doesn't match", "fifoScheduler", type);
  assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state);
  assertEquals("capacity doesn't match", 1.0, capacity, 0.0);
  assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0);
  assertEquals(
      "minQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
      minQueueCapacity);
  assertEquals("maxQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      maxQueueCapacity);
  assertEquals("numNodes doesn't match", 0, numNodes);
  assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity);
  assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity);
  assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity);
  assertEquals("numContainers doesn't match", 0, numContainers);

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestYarnCLI.java   
@Test
public void testGetQueueInfo() throws Exception {
  QueueCLI cli = createAndGetQueueCLI();
  Set<String> nodeLabels = new HashSet<String>();
  nodeLabels.add("GPU");
  nodeLabels.add("JDK_7");
  QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
      null, null, QueueState.RUNNING, nodeLabels, "GPU");
  when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
  int result = cli.run(new String[] { "-status", "queueA" });
  assertEquals(0, result);
  verify(client).getQueueInfo("queueA");
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Queue Information : ");
  pw.println("Queue Name : " + "queueA");
  pw.println("\tState : " + "RUNNING");
  pw.println("\tCapacity : " + "40.0%");
  pw.println("\tCurrent Capacity : " + "50.0%");
  pw.println("\tMaximum Capacity : " + "80.0%");
  pw.println("\tDefault Node Label expression : " + "GPU");
  pw.println("\tAccessible Node Labels : " + "JDK_7,GPU");
  pw.close();
  String queueInfoStr = baos.toString("UTF-8");
  Assert.assertEquals(queueInfoStr, sysOutStream.toString());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestYarnCLI.java   
@Test
public void testGetQueueInfoWithEmptyNodeLabel() throws Exception {
  QueueCLI cli = createAndGetQueueCLI();
  QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
      null, null, QueueState.RUNNING, null, null);
  when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
  int result = cli.run(new String[] { "-status", "queueA" });
  assertEquals(0, result);
  verify(client).getQueueInfo("queueA");
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Queue Information : ");
  pw.println("Queue Name : " + "queueA");
  pw.println("\tState : " + "RUNNING");
  pw.println("\tCapacity : " + "40.0%");
  pw.println("\tCurrent Capacity : " + "50.0%");
  pw.println("\tMaximum Capacity : " + "80.0%");
  pw.println("\tDefault Node Label expression : ");
  pw.println("\tAccessible Node Labels : ");
  pw.close();
  String queueInfoStr = baos.toString("UTF-8");
  Assert.assertEquals(queueInfoStr, sysOutStream.toString());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTypeConverter.java   
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));

  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }

  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }

  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }

  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTypeConverter.java   
/**
 * Test that child queues are converted too during conversion of the parent
 * queue
 */
@Test
public void testFromYarnQueue() {
  //Define child queue
  org.apache.hadoop.yarn.api.records.QueueInfo child =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);

  //Define parent queue
  org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
    new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
  children.add(child); //Add one child
  Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
  Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);

  //Call the function we're testing
  org.apache.hadoop.mapreduce.QueueInfo returned =
    TypeConverter.fromYarn(queueInfo, new Configuration());

  //Verify that the converted queue has the 1 child we had added
  Assert.assertEquals("QueueInfo children weren't properly converted",
    returned.getQueueChildren().size(), 1);
}
项目:hadoop-plus    文件:FifoScheduler.java   
@Override
public QueueInfo getQueueInfo( 
    boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
  queueInfo.setCapacity(1.0f);
  if (clusterResource.getMemory() == 0) {
    queueInfo.setCurrentCapacity(0.0f);
  } else {
    queueInfo.setCurrentCapacity((float) usedResource.getMemory()
        / clusterResource.getMemory());
  }
  queueInfo.setMaximumCapacity(1.0f);
  queueInfo.setChildQueues(new ArrayList<QueueInfo>());
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:hadoop-plus    文件:FSQueue.java   
@Override
public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(getQueueName());
  // TODO: we might change these queue metrics around a little bit
  // to match the semantics of the fair scheduler.
  queueInfo.setCapacity((float) getFairShare().getMemory() /
      scheduler.getClusterCapacity().getMemory());
  queueInfo.setCapacity((float) getResourceUsage().getMemory() /
      scheduler.getClusterCapacity().getMemory());

  ArrayList<QueueInfo> childQueueInfos = new ArrayList<QueueInfo>();
  if (includeChildQueues) {
    Collection<FSQueue> childQueues = getChildQueues();
    for (FSQueue child : childQueues) {
      childQueueInfos.add(child.getQueueInfo(recursive, recursive));
    }
  }
  queueInfo.setChildQueues(childQueueInfos);
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:hadoop-plus    文件:TestRMWebServices.java   
public void verifyClusterSchedulerFifoGeneric(String type, String state,
    float capacity, float usedCapacity, int minQueueCapacity,
    int maxQueueCapacity, int numNodes, int usedNodeCapacity,
    int availNodeCapacity, int totalNodeCapacity, int numContainers)
    throws JSONException, Exception {

  assertEquals("type doesn't match", "fifoScheduler", type);
  assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state);
  assertEquals("capacity doesn't match", 1.0, capacity, 0.0);
  assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0);
  assertEquals(
      "minQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
      minQueueCapacity);
  assertEquals("maxQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      maxQueueCapacity);
  assertEquals("numNodes doesn't match", 0, numNodes);
  assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity);
  assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity);
  assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity);
  assertEquals("numContainers doesn't match", 0, numContainers);

}
项目:hadoop-plus    文件:TestTypeConverter.java   
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));

  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }

  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }

  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }

  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
项目:hadoop-plus    文件:TestTypeConverter.java   
/**
 * Test that child queues are converted too during conversion of the parent
 * queue
 */
@Test
public void testFromYarnQueue() {
  //Define child queue
  org.apache.hadoop.yarn.api.records.QueueInfo child =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);

  //Define parent queue
  org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
    new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
  children.add(child); //Add one child
  Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
  Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);

  //Call the function we're testing
  org.apache.hadoop.mapreduce.QueueInfo returned =
    TypeConverter.fromYarn(queueInfo, new Configuration());

  //Verify that the converted queue has the 1 child we had added
  Assert.assertEquals("QueueInfo children weren't properly converted",
    returned.getQueueChildren().size(), 1);
}
项目:FlexMap    文件:TestTypeConverter.java   
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));

  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }

  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }

  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }

  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
项目:FlexMap    文件:TestTypeConverter.java   
/**
 * Test that child queues are converted too during conversion of the parent
 * queue
 */
@Test
public void testFromYarnQueue() {
  //Define child queue
  org.apache.hadoop.yarn.api.records.QueueInfo child =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);

  //Define parent queue
  org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
    new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
  children.add(child); //Add one child
  Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
  Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);

  //Call the function we're testing
  org.apache.hadoop.mapreduce.QueueInfo returned =
    TypeConverter.fromYarn(queueInfo, new Configuration());

  //Verify that the converted queue has the 1 child we had added
  Assert.assertEquals("QueueInfo children weren't properly converted",
    returned.getQueueChildren().size(), 1);
}
项目:hops    文件:FifoScheduler.java   
@Override
public QueueInfo getQueueInfo( 
    boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
  queueInfo.setCapacity(1.0f);
  if (clusterResource.getMemorySize() == 0) {
    queueInfo.setCurrentCapacity(0.0f);
  } else {
    queueInfo.setCurrentCapacity((float) usedResource.getMemorySize()
        / clusterResource.getMemorySize());
  }
  queueInfo.setMaximumCapacity(1.0f);
  queueInfo.setChildQueues(new ArrayList<QueueInfo>());
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:hops    文件:TestRMWebServices.java   
public void verifyClusterSchedulerFifoGeneric(String type, String state,
    float capacity, float usedCapacity, int minQueueCapacity,
    int maxQueueCapacity, int numNodes, int usedNodeCapacity,
    int availNodeCapacity, int totalNodeCapacity, int numContainers)
    throws JSONException, Exception {

  assertEquals("type doesn't match", "fifoScheduler", type);
  assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state);
  assertEquals("capacity doesn't match", 1.0, capacity, 0.0);
  assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0);
  assertEquals(
      "minQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
      minQueueCapacity);
  assertEquals("maxQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      maxQueueCapacity);
  assertEquals("numNodes doesn't match", 0, numNodes);
  assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity);
  assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity);
  assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity);
  assertEquals("numContainers doesn't match", 0, numContainers);

}
项目:hops    文件:TestYarnCLI.java   
@Test
public void testGetQueueInfoWithEmptyNodeLabel() throws Exception {
  QueueCLI cli = createAndGetQueueCLI();
  QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
      null, null, QueueState.RUNNING, null, null, null, true);
  when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
  int result = cli.run(new String[] { "-status", "queueA" });
  assertEquals(0, result);
  verify(client).getQueueInfo("queueA");
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Queue Information : ");
  pw.println("Queue Name : " + "queueA");
  pw.println("\tState : " + "RUNNING");
  pw.println("\tCapacity : " + "40.0%");
  pw.println("\tCurrent Capacity : " + "50.0%");
  pw.println("\tMaximum Capacity : " + "80.0%");
  pw.println("\tDefault Node Label expression : "
      + NodeLabel.DEFAULT_NODE_LABEL_PARTITION);
  pw.println("\tAccessible Node Labels : ");
  pw.println("\tPreemption : " + "disabled");
  pw.close();
  String queueInfoStr = baos.toString("UTF-8");
  Assert.assertEquals(queueInfoStr, sysOutStream.toString());
}
项目:hops    文件:TestTypeConverter.java   
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));

  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }

  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }

  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }

  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
项目:hops    文件:TestTypeConverter.java   
/**
 * Test that child queues are converted too during conversion of the parent
 * queue
 */
@Test
public void testFromYarnQueue() {
  //Define child queue
  org.apache.hadoop.yarn.api.records.QueueInfo child =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);

  //Define parent queue
  org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
    new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
  children.add(child); //Add one child
  Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
  Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);

  //Call the function we're testing
  org.apache.hadoop.mapreduce.QueueInfo returned =
    TypeConverter.fromYarn(queueInfo, new Configuration());

  //Verify that the converted queue has the 1 child we had added
  Assert.assertEquals("QueueInfo children weren't properly converted",
    returned.getQueueChildren().size(), 1);
}
项目:hadoop-TCP    文件:FifoScheduler.java   
@Override
public QueueInfo getQueueInfo( 
    boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
  queueInfo.setCapacity(1.0f);
  if (clusterResource.getMemory() == 0) {
    queueInfo.setCurrentCapacity(0.0f);
  } else {
    queueInfo.setCurrentCapacity((float) usedResource.getMemory()
        / clusterResource.getMemory());
  }
  queueInfo.setMaximumCapacity(1.0f);
  queueInfo.setChildQueues(new ArrayList<QueueInfo>());
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:hadoop-TCP    文件:FSQueue.java   
@Override
public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) {
  QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
  queueInfo.setQueueName(getQueueName());
  // TODO: we might change these queue metrics around a little bit
  // to match the semantics of the fair scheduler.
  queueInfo.setCapacity((float) getFairShare().getMemory() /
      scheduler.getClusterCapacity().getMemory());
  queueInfo.setCapacity((float) getResourceUsage().getMemory() /
      scheduler.getClusterCapacity().getMemory());

  ArrayList<QueueInfo> childQueueInfos = new ArrayList<QueueInfo>();
  if (includeChildQueues) {
    Collection<FSQueue> childQueues = getChildQueues();
    for (FSQueue child : childQueues) {
      childQueueInfos.add(child.getQueueInfo(recursive, recursive));
    }
  }
  queueInfo.setChildQueues(childQueueInfos);
  queueInfo.setQueueState(QueueState.RUNNING);
  return queueInfo;
}
项目:hadoop-TCP    文件:TestRMWebServices.java   
public void verifyClusterSchedulerFifoGeneric(String type, String state,
    float capacity, float usedCapacity, int minQueueCapacity,
    int maxQueueCapacity, int numNodes, int usedNodeCapacity,
    int availNodeCapacity, int totalNodeCapacity, int numContainers)
    throws JSONException, Exception {

  assertEquals("type doesn't match", "fifoScheduler", type);
  assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), state);
  assertEquals("capacity doesn't match", 1.0, capacity, 0.0);
  assertEquals("usedCapacity doesn't match", 0.0, usedCapacity, 0.0);
  assertEquals(
      "minQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
      minQueueCapacity);
  assertEquals("maxQueueMemoryCapacity doesn't match",
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      maxQueueCapacity);
  assertEquals("numNodes doesn't match", 0, numNodes);
  assertEquals("usedNodeCapacity doesn't match", 0, usedNodeCapacity);
  assertEquals("availNodeCapacity doesn't match", 0, availNodeCapacity);
  assertEquals("totalNodeCapacity doesn't match", 0, totalNodeCapacity);
  assertEquals("numContainers doesn't match", 0, numContainers);

}
项目:hadoop-TCP    文件:TestTypeConverter.java   
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));

  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }

  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }

  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }

  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
项目:hadoop-TCP    文件:TestTypeConverter.java   
/**
 * Test that child queues are converted too during conversion of the parent
 * queue
 */
@Test
public void testFromYarnQueue() {
  //Define child queue
  org.apache.hadoop.yarn.api.records.QueueInfo child =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);

  //Define parent queue
  org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
    Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
  List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
    new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
  children.add(child); //Add one child
  Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
  Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);

  //Call the function we're testing
  org.apache.hadoop.mapreduce.QueueInfo returned =
    TypeConverter.fromYarn(queueInfo, new Configuration());

  //Verify that the converted queue has the 1 child we had added
  Assert.assertEquals("QueueInfo children weren't properly converted",
    returned.getQueueChildren().size(), 1);
}