Java 类org.apache.hadoop.mapred.Counters.Counter 实例源码

项目:hadoop    文件:TestCounters.java   
@SuppressWarnings("deprecation")
private void checkLegacyNames(Counters counters) {
  assertEquals("New name", 1, counters.findCounter(
      TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.Task$Counter",
      "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.JobInProgress$Counter",
      "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue());
  assertEquals("New name and method", 1, counters.findCounter("file",
      FileSystemCounter.BYTES_READ).getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "FileSystemCounters",
      "FILE_BYTES_READ").getValue());
}
项目:hadoop    文件:TestCounters.java   
@Test
public void testFileSystemGroupIteratorConcurrency() {
  Counters counters = new Counters();
  // create 2 filesystem counter groups
  counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
  counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);

  // Iterate over the counters in this group while updating counters in
  // the group
  Group group = counters.getGroup(FileSystemCounter.class.getName());
  Iterator<Counter> iterator = group.iterator();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
}
项目:hadoop    文件:TestCounters.java   
@SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  FrameworkGroupFactory frameworkGroupFactory = 
      groupFactory.newFrameworkGroupFactory(JobCounter.class);
  Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");

  FrameworkCounterGroup counterGroup = 
      (FrameworkCounterGroup) group.getUnderlyingGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      counterGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:aliyun-oss-hadoop-fs    文件:TestCounters.java   
@SuppressWarnings("deprecation")
private void checkLegacyNames(Counters counters) {
  assertEquals("New name", 1, counters.findCounter(
      TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.Task$Counter",
      "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.JobInProgress$Counter",
      "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue());
  assertEquals("New name and method", 1, counters.findCounter("file",
      FileSystemCounter.BYTES_READ).getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "FileSystemCounters",
      "FILE_BYTES_READ").getValue());
}
项目:aliyun-oss-hadoop-fs    文件:TestCounters.java   
@Test
public void testFileSystemGroupIteratorConcurrency() {
  Counters counters = new Counters();
  // create 2 filesystem counter groups
  counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
  counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);

  // Iterate over the counters in this group while updating counters in
  // the group
  Group group = counters.getGroup(FileSystemCounter.class.getName());
  Iterator<Counter> iterator = group.iterator();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
}
项目:aliyun-oss-hadoop-fs    文件:TestCounters.java   
@SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  FrameworkGroupFactory frameworkGroupFactory = 
      groupFactory.newFrameworkGroupFactory(JobCounter.class);
  Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");

  FrameworkCounterGroup counterGroup = 
      (FrameworkCounterGroup) group.getUnderlyingGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      counterGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:big-c    文件:TestCounters.java   
@SuppressWarnings("deprecation")
private void checkLegacyNames(Counters counters) {
  assertEquals("New name", 1, counters.findCounter(
      TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.Task$Counter",
      "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.JobInProgress$Counter",
      "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue());
  assertEquals("New name and method", 1, counters.findCounter("file",
      FileSystemCounter.BYTES_READ).getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "FileSystemCounters",
      "FILE_BYTES_READ").getValue());
}
项目:big-c    文件:TestCounters.java   
@Test
public void testFileSystemGroupIteratorConcurrency() {
  Counters counters = new Counters();
  // create 2 filesystem counter groups
  counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
  counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);

  // Iterate over the counters in this group while updating counters in
  // the group
  Group group = counters.getGroup(FileSystemCounter.class.getName());
  Iterator<Counter> iterator = group.iterator();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
}
项目:big-c    文件:TestCounters.java   
@SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  FrameworkGroupFactory frameworkGroupFactory = 
      groupFactory.newFrameworkGroupFactory(JobCounter.class);
  Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");

  FrameworkCounterGroup counterGroup = 
      (FrameworkCounterGroup) group.getUnderlyingGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      counterGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCounters.java   
@SuppressWarnings("deprecation")
private void checkLegacyNames(Counters counters) {
  assertEquals("New name", 1, counters.findCounter(
      TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.Task$Counter",
      "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.JobInProgress$Counter",
      "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue());
  assertEquals("New name and method", 1, counters.findCounter("file",
      FileSystemCounter.BYTES_READ).getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "FileSystemCounters",
      "FILE_BYTES_READ").getValue());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCounters.java   
@Test
public void testFileSystemGroupIteratorConcurrency() {
  Counters counters = new Counters();
  // create 2 filesystem counter groups
  counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
  counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);

  // Iterate over the counters in this group while updating counters in
  // the group
  Group group = counters.getGroup(FileSystemCounter.class.getName());
  Iterator<Counter> iterator = group.iterator();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCounters.java   
@SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  FrameworkGroupFactory frameworkGroupFactory = 
      groupFactory.newFrameworkGroupFactory(JobCounter.class);
  Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");

  FrameworkCounterGroup counterGroup = 
      (FrameworkCounterGroup) group.getUnderlyingGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      counterGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCounters.java   
@Test
public void testFileSystemGroupIteratorConcurrency() {
  Counters counters = new Counters();
  // create 2 filesystem counter groups
  counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
  counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);

  // Iterate over the counters in this group while updating counters in
  // the group
  Group group = counters.getGroup(FileSystemCounter.class.getName());
  Iterator<Counter> iterator = group.iterator();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMerger.java   
@SuppressWarnings( { "deprecation", "unchecked" })
public void testMergeShouldReturnProperProgress(
    List<Segment<Text, Text>> segments) throws IOException {
  Configuration conf = new Configuration();
  JobConf jobConf = new JobConf();
  FileSystem fs = FileSystem.getLocal(conf);
  Path tmpDir = new Path("localpath");
  Class<Text> keyClass = (Class<Text>) jobConf.getMapOutputKeyClass();
  Class<Text> valueClass = (Class<Text>) jobConf.getMapOutputValueClass();
  RawComparator<Text> comparator = jobConf.getOutputKeyComparator();
  Counter readsCounter = new Counter();
  Counter writesCounter = new Counter();
  RawKeyValueIterator mergeQueue = Merger.merge(conf, fs, keyClass,
      valueClass, segments, 2, tmpDir, comparator, getReporter(),
      readsCounter, writesCounter);
  Assert.assertEquals(1.0f, mergeQueue.getProgress().get());
}
项目:hops    文件:TestCounters.java   
@SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  FrameworkGroupFactory frameworkGroupFactory = 
      groupFactory.newFrameworkGroupFactory(JobCounter.class);
  Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");

  FrameworkCounterGroup counterGroup = 
      (FrameworkCounterGroup) group.getUnderlyingGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      counterGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:hadoop-plus    文件:TestCounters.java   
@SuppressWarnings("deprecation")
private void checkLegacyNames(Counters counters) {
  assertEquals("New name", 1, counters.findCounter(
      TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.Task$Counter",
      "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.JobInProgress$Counter",
      "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue());
  assertEquals("New name and method", 1, counters.findCounter("file",
      FileSystemCounter.BYTES_READ).getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "FileSystemCounters",
      "FILE_BYTES_READ").getValue());
}
项目:hadoop-plus    文件:TestCounters.java   
@Test
public void testFileSystemGroupIteratorConcurrency() {
  Counters counters = new Counters();
  // create 2 filesystem counter groups
  counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
  counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);

  // Iterate over the counters in this group while updating counters in
  // the group
  Group group = counters.getGroup(FileSystemCounter.class.getName());
  Iterator<Counter> iterator = group.iterator();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
}
项目:FlexMap    文件:TestCounters.java   
@SuppressWarnings("deprecation")
private void checkLegacyNames(Counters counters) {
  assertEquals("New name", 1, counters.findCounter(
      TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.Task$Counter",
      "MAP_INPUT_RECORDS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "org.apache.hadoop.mapred.JobInProgress$Counter",
      "DATA_LOCAL_MAPS").getValue());
  assertEquals("Legacy enum", 1,
      counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue());

  assertEquals("New name", 1, counters.findCounter(
      FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue());
  assertEquals("New name and method", 1, counters.findCounter("file",
      FileSystemCounter.BYTES_READ).getValue());
  assertEquals("Legacy name", 1, counters.findCounter(
      "FileSystemCounters",
      "FILE_BYTES_READ").getValue());
}
项目:FlexMap    文件:TestCounters.java   
@Test
public void testFileSystemGroupIteratorConcurrency() {
  Counters counters = new Counters();
  // create 2 filesystem counter groups
  counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
  counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);

  // Iterate over the counters in this group while updating counters in
  // the group
  Group group = counters.getGroup(FileSystemCounter.class.getName());
  Iterator<Counter> iterator = group.iterator();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
}
项目:FlexMap    文件:TestCounters.java   
@SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  FrameworkGroupFactory frameworkGroupFactory = 
      groupFactory.newFrameworkGroupFactory(JobCounter.class);
  Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");

  FrameworkCounterGroup counterGroup = 
      (FrameworkCounterGroup) group.getUnderlyingGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      counterGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:hops    文件:TestCounters.java   
@Test
public void testFileSystemGroupIteratorConcurrency() {
  Counters counters = new Counters();
  // create 2 filesystem counter groups
  counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
  counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);

  // Iterate over the counters in this group while updating counters in
  // the group
  Group group = counters.getGroup(FileSystemCounter.class.getName());
  Iterator<Counter> iterator = group.iterator();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
  counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
  assertTrue(iterator.hasNext());
  iterator.next();
}
项目:hadoop    文件:TestPipeApplication.java   
public Counters.Counter getCounter(String group, String name) {
  Counters.Counter counter = null;
  if (counters != null) {
    counter = counters.findCounter(group, name);
    if (counter == null) {
      Group grp = counters.addGroup(group, group);
      counter = grp.addCounter(name, name, 10);
    }
  }
  return counter;
}
项目:hadoop    文件:TestCounters.java   
/**
 * Verify counter value works
 */
@SuppressWarnings("deprecation")
@Test
public void testCounterValue() {
  Counters counters = new Counters();
  final int NUMBER_TESTS = 100;
  final int NUMBER_INC = 10;
  final Random rand = new Random();
  for (int i = 0; i < NUMBER_TESTS; i++) {
    long initValue = rand.nextInt();
    long expectedValue = initValue;
    Counter counter = counters.findCounter("foo", "bar");
    counter.setValue(initValue);
    assertEquals("Counter value is not initialized correctly",
                 expectedValue, counter.getValue());
    for (int j = 0; j < NUMBER_INC; j++) {
      int incValue = rand.nextInt();
      counter.increment(incValue);
      expectedValue += incValue;
      assertEquals("Counter value is not incremented correctly",
                   expectedValue, counter.getValue());
    }
    expectedValue = rand.nextInt();
    counter.setValue(expectedValue);
    assertEquals("Counter value is not set correctly",
                 expectedValue, counter.getValue());
  }
}
项目:hadoop    文件:TestCounters.java   
@SuppressWarnings("deprecation")
@Test
public void testWriteWithLegacyNames() {
  Counters counters = new Counters();
  counters.incrCounter(Task.Counter.MAP_INPUT_RECORDS, 1);
  counters.incrCounter(JobInProgress.Counter.DATA_LOCAL_MAPS, 1);
  counters.findCounter("FileSystemCounters", "FILE_BYTES_READ").increment(1);

  checkLegacyNames(counters);
}
项目:hadoop    文件:TestCounters.java   
@SuppressWarnings("deprecation")
@Test
public void testGroupIteratorConcurrency() {
  Counters counters = new Counters();
  counters.incrCounter("group1", "counter1", 1);
  Group group = counters.getGroup("group1");
  Iterator<Counter> iterator = group.iterator();
  counters.incrCounter("group1", "counter2", 1);
  iterator.next();
}
项目:hadoop    文件:TestCounters.java   
@Test
public void testFilesystemCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  Group fsGroup = groupFactory.newFileSystemGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      fsGroup.findCounter("ANY_BYTES_READ");
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      fsGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:hadoop    文件:TestStreamingCounters.java   
private void validateCounters() throws IOException {
  Counters counters = job.running_.getCounters();
  assertNotNull("Counters", counters);
  Group group = counters.getGroup("UserCounters");
  assertNotNull("Group", group);
  Counter counter = group.getCounterForName("InputLines");
  assertNotNull("Counter", counter);
  assertEquals(3, counter.getCounter());
}
项目:aliyun-oss-hadoop-fs    文件:TestPipeApplication.java   
public Counters.Counter getCounter(String group, String name) {
  Counters.Counter counter = null;
  if (counters != null) {
    counter = counters.findCounter(group, name);
    if (counter == null) {
      Group grp = counters.addGroup(group, group);
      counter = grp.addCounter(name, name, 10);
    }
  }
  return counter;
}
项目:aliyun-oss-hadoop-fs    文件:CombinerHandler.java   
public static <K, V> ICombineHandler create(TaskContext context)
  throws IOException, ClassNotFoundException {
  final JobConf conf = new JobConf(context.getConf());
  conf.set(Constants.SERIALIZATION_FRAMEWORK,
      String.valueOf(SerializationFramework.WRITABLE_SERIALIZATION.getType()));
  String combinerClazz = conf.get(Constants.MAPRED_COMBINER_CLASS);
  if (null == combinerClazz) {
    combinerClazz = conf.get(MRJobConfig.COMBINE_CLASS_ATTR);
  }

  if (null == combinerClazz) {
    return null;
  } else {
    LOG.info("NativeTask Combiner is enabled, class = " + combinerClazz);
  }

  final Counter combineInputCounter = context.getTaskReporter().getCounter(
      TaskCounter.COMBINE_INPUT_RECORDS);

  final CombinerRunner<K, V> combinerRunner = CombinerRunner.create(
      conf, context.getTaskAttemptId(),
      combineInputCounter, context.getTaskReporter(), null);

  final INativeHandler nativeHandler = NativeBatchProcessor.create(
    NAME, conf, DataChannel.INOUT);
  @SuppressWarnings("unchecked")
  final BufferPusher<K, V> pusher = new BufferPusher<K, V>((Class<K>)context.getInputKeyClass(),
      (Class<V>)context.getInputValueClass(),
      nativeHandler);
  final BufferPuller puller = new BufferPuller(nativeHandler);
  return new CombinerHandler<K, V>(nativeHandler, combinerRunner, puller, pusher);
}
项目:aliyun-oss-hadoop-fs    文件:TestCounters.java   
/**
 * Verify counter value works
 */
@SuppressWarnings("deprecation")
@Test
public void testCounterValue() {
  Counters counters = new Counters();
  final int NUMBER_TESTS = 100;
  final int NUMBER_INC = 10;
  final Random rand = new Random();
  for (int i = 0; i < NUMBER_TESTS; i++) {
    long initValue = rand.nextInt();
    long expectedValue = initValue;
    Counter counter = counters.findCounter("foo", "bar");
    counter.setValue(initValue);
    assertEquals("Counter value is not initialized correctly",
                 expectedValue, counter.getValue());
    for (int j = 0; j < NUMBER_INC; j++) {
      int incValue = rand.nextInt();
      counter.increment(incValue);
      expectedValue += incValue;
      assertEquals("Counter value is not incremented correctly",
                   expectedValue, counter.getValue());
    }
    expectedValue = rand.nextInt();
    counter.setValue(expectedValue);
    assertEquals("Counter value is not set correctly",
                 expectedValue, counter.getValue());
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestCounters.java   
@SuppressWarnings("deprecation")
@Test
public void testWriteWithLegacyNames() {
  Counters counters = new Counters();
  counters.incrCounter(Task.Counter.MAP_INPUT_RECORDS, 1);
  counters.incrCounter(JobInProgress.Counter.DATA_LOCAL_MAPS, 1);
  counters.findCounter("FileSystemCounters", "FILE_BYTES_READ").increment(1);

  checkLegacyNames(counters);
}
项目:aliyun-oss-hadoop-fs    文件:TestCounters.java   
@SuppressWarnings("deprecation")
@Test
public void testGroupIteratorConcurrency() {
  Counters counters = new Counters();
  counters.incrCounter("group1", "counter1", 1);
  Group group = counters.getGroup("group1");
  Iterator<Counter> iterator = group.iterator();
  counters.incrCounter("group1", "counter2", 1);
  iterator.next();
}
项目:aliyun-oss-hadoop-fs    文件:TestCounters.java   
@Test
public void testFilesystemCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  Group fsGroup = groupFactory.newFileSystemGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      fsGroup.findCounter("ANY_BYTES_READ");
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      fsGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:aliyun-oss-hadoop-fs    文件:TestStreamingCounters.java   
private void validateCounters() throws IOException {
  Counters counters = job.running_.getCounters();
  assertNotNull("Counters", counters);
  Group group = counters.getGroup("UserCounters");
  assertNotNull("Group", group);
  Counter counter = group.getCounterForName("InputLines");
  assertNotNull("Counter", counter);
  assertEquals(3, counter.getCounter());
}
项目:big-c    文件:TestPipeApplication.java   
public Counters.Counter getCounter(String group, String name) {
  Counters.Counter counter = null;
  if (counters != null) {
    counter = counters.findCounter(group, name);
    if (counter == null) {
      Group grp = counters.addGroup(group, group);
      counter = grp.addCounter(name, name, 10);
    }
  }
  return counter;
}
项目:big-c    文件:TestCounters.java   
/**
 * Verify counter value works
 */
@SuppressWarnings("deprecation")
@Test
public void testCounterValue() {
  Counters counters = new Counters();
  final int NUMBER_TESTS = 100;
  final int NUMBER_INC = 10;
  final Random rand = new Random();
  for (int i = 0; i < NUMBER_TESTS; i++) {
    long initValue = rand.nextInt();
    long expectedValue = initValue;
    Counter counter = counters.findCounter("foo", "bar");
    counter.setValue(initValue);
    assertEquals("Counter value is not initialized correctly",
                 expectedValue, counter.getValue());
    for (int j = 0; j < NUMBER_INC; j++) {
      int incValue = rand.nextInt();
      counter.increment(incValue);
      expectedValue += incValue;
      assertEquals("Counter value is not incremented correctly",
                   expectedValue, counter.getValue());
    }
    expectedValue = rand.nextInt();
    counter.setValue(expectedValue);
    assertEquals("Counter value is not set correctly",
                 expectedValue, counter.getValue());
  }
}
项目:big-c    文件:TestCounters.java   
@SuppressWarnings("deprecation")
@Test
public void testWriteWithLegacyNames() {
  Counters counters = new Counters();
  counters.incrCounter(Task.Counter.MAP_INPUT_RECORDS, 1);
  counters.incrCounter(JobInProgress.Counter.DATA_LOCAL_MAPS, 1);
  counters.findCounter("FileSystemCounters", "FILE_BYTES_READ").increment(1);

  checkLegacyNames(counters);
}
项目:big-c    文件:TestCounters.java   
@SuppressWarnings("deprecation")
@Test
public void testGroupIteratorConcurrency() {
  Counters counters = new Counters();
  counters.incrCounter("group1", "counter1", 1);
  Group group = counters.getGroup("group1");
  Iterator<Counter> iterator = group.iterator();
  counters.incrCounter("group1", "counter2", 1);
  iterator.next();
}
项目:big-c    文件:TestCounters.java   
@Test
public void testFilesystemCounter() {
  GroupFactory groupFactory = new GroupFactoryForTest();
  Group fsGroup = groupFactory.newFileSystemGroup();

  org.apache.hadoop.mapreduce.Counter count1 = 
      fsGroup.findCounter("ANY_BYTES_READ");
  Assert.assertNotNull(count1);

  // Verify no exception get thrown when finding an unknown counter
  org.apache.hadoop.mapreduce.Counter count2 = 
      fsGroup.findCounter("Unknown");
  Assert.assertNull(count2);
}
项目:big-c    文件:TestStreamingCounters.java   
private void validateCounters() throws IOException {
  Counters counters = job.running_.getCounters();
  assertNotNull("Counters", counters);
  Group group = counters.getGroup("UserCounters");
  assertNotNull("Group", group);
  Counter counter = group.getCounterForName("InputLines");
  assertNotNull("Counter", counter);
  assertEquals(3, counter.getCounter());
}