Java 类org.apache.hadoop.mapred.BackupStore 实例源码

项目:hadoop    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:aliyun-oss-hadoop-fs    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:big-c    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:hadoop-plus    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:FlexMap    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:hops    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:hadoop-TCP    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:hardfs    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:hadoop-on-lustre2    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (getBackupStore() == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:mapreduce-fork    文件:ReduceContextImpl.java   
@Override
public void mark() throws IOException {
  if (backupStore == null) {
    backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
  }
  isMarked = true;
  if (!inReset) {
    backupStore.reinitialize();
    if (currentKeyLength == -1) {
      // The user has not called next() for this iterator yet, so
      // there is no current record to mark and copy to backup store.
      return;
    }
    assert (currentValueLength != -1);
    int requestedSize = currentKeyLength + currentValueLength + 
      WritableUtils.getVIntSize(currentKeyLength) +
      WritableUtils.getVIntSize(currentValueLength);
    DataOutputStream out = backupStore.getOutputStream(requestedSize);
    writeFirstKeyValueBytes(out);
    backupStore.updateCounters(requestedSize);
  } else {
    backupStore.mark();
  }
}
项目:hadoop    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:aliyun-oss-hadoop-fs    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:big-c    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:hadoop-plus    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:FlexMap    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:hops    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:hadoop-TCP    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:hardfs    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}
项目:hadoop-on-lustre2    文件:ReduceContextImpl.java   
BackupStore<KEYIN,VALUEIN> getBackupStore() {
  return backupStore;
}