Java 类org.apache.hadoop.fs.Path 实例源码

项目:hadoop    文件:TestJoinDatamerge.java   
private static void checkOuterConsistency(Job job, Path[] src) 
    throws IOException {
  Path outf = FileOutputFormat.getOutputPath(job);
  FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new 
                           Utils.OutputFileUtils.OutputFilesFilter());
  assertEquals("number of part files is more than 1. It is" + outlist.length,
    1, outlist.length);
  assertTrue("output file with zero length" + outlist[0].getLen(),
    0 < outlist[0].getLen());
  SequenceFile.Reader r =
    new SequenceFile.Reader(cluster.getFileSystem(),
        outlist[0].getPath(), job.getConfiguration());
  IntWritable k = new IntWritable();
  IntWritable v = new IntWritable();
  while (r.next(k, v)) {
    assertEquals("counts does not match", v.get(),
      countProduct(k, src, job.getConfiguration()));
  }
  r.close();
}
项目:hadoop    文件:FTPFileSystem.java   
/**
 * Convenience method, so that we don't open a new connection when using this
 * method from within another method. Otherwise every API invocation incurs
 * the overhead of opening/closing a TCP connection.
 */
private boolean mkdirs(FTPClient client, Path file, FsPermission permission)
    throws IOException {
  boolean created = true;
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  String pathName = absolute.getName();
  if (!exists(client, absolute)) {
    Path parent = absolute.getParent();
    created = (parent == null || mkdirs(client, parent, FsPermission
        .getDirDefault()));
    if (created) {
      String parentDir = parent.toUri().getPath();
      client.changeWorkingDirectory(parentDir);
      created = created && client.makeDirectory(pathName);
    }
  } else if (isFile(client, absolute)) {
    throw new ParentNotDirectoryException(String.format(
        "Can't make directory for path %s since it is a file.", absolute));
  }
  return created;
}
项目:oryx2    文件:KMeansUpdate.java   
/**
 * @param sparkContext    active Spark Context
 * @param trainData       training data on which to build a model
 * @param hyperParameters ordered list of hyper parameter values to use in building model
 * @param candidatePath   directory where additional model files can be written
 * @return a {@link PMML} representation of a model trained on the given data
 */
@Override
public PMML buildModel(JavaSparkContext sparkContext,
                       JavaRDD<String> trainData,
                       List<?> hyperParameters,
                       Path candidatePath) {
  int numClusters = (Integer) hyperParameters.get(0);
  Preconditions.checkArgument(numClusters > 1);
  log.info("Building KMeans Model with {} clusters", numClusters);

  JavaRDD<Vector> trainingData = parsedToVectorRDD(trainData.map(MLFunctions.PARSE_FN));
  KMeansModel kMeansModel = KMeans.train(trainingData.rdd(), numClusters, maxIterations,
                                         numberOfRuns, initializationStrategy);

  return kMeansModelToPMML(kMeansModel, fetchClusterCountsFromModel(trainingData, kMeansModel));
}
项目:ditb    文件:DataBlockEncodingTool.java   
/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.NONE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  int majorVersion = reader.getHFileVersion();
  comp.useHBaseChecksum = majorVersion > 2
      || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
项目:hadoop    文件:TestNameNodeMetrics.java   
/**
 * Test NN ReadOps Count and WriteOps Count
 */
@Test
public void testReadWriteOps() throws Exception {
  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
      rb);
  Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");

  //Perform create file operation
  createFile(file1_Path, 1024 * 1024,(short)2);

  // Perform read file operation on earlier created file
  readFile(fs, file1_Path);
  MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
  assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
      startWriteCounter);
}
项目:hadoop-oss    文件:TestNativeIO.java   
@Test (timeout = 30000)
public void testFstat() throws Exception {
  FileOutputStream fos = new FileOutputStream(
    new File(TEST_DIR, "testfstat"));
  NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
  fos.close();
  LOG.info("Stat: " + String.valueOf(stat));

  String owner = stat.getOwner();
  String expectedOwner = System.getProperty("user.name");
  if (Path.WINDOWS) {
    UserGroupInformation ugi =
        UserGroupInformation.createRemoteUser(expectedOwner);
    final String adminsGroupString = "Administrators";
    if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
      expectedOwner = adminsGroupString;
    }
  }
  assertEquals(expectedOwner, owner);
  assertNotNull(stat.getGroup());
  assertTrue(!stat.getGroup().isEmpty());
  assertEquals("Stat mode field should indicate a regular file", S_IFREG,
    stat.getMode() & S_IFMT);
}
项目:hadoop-oss    文件:TestCodec.java   
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {

  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(new Path(
      System.getProperty("test.build.data", "/tmp")),
    clazz.getSimpleName() + "-" + type + "-" + records);

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
项目:hadoop    文件:BlockReportTestBase.java   
/**
 * Test writes a file and closes it.
 * Block reported is generated with a bad GS for a single block.
 * Block report is forced and the check for # of corrupted blocks is performed.
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_03() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  writeFile(METHOD_NAME, FILE_SIZE, filePath);

  // all blocks belong to the same file, hence same BP
  DataNode dn = cluster.getDataNodes().get(DN_N0);
  String poolId = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
项目:hadoop    文件:TestConfiguration.java   
public void testBooleanValues() throws IOException {
  out=new BufferedWriter(new FileWriter(CONFIG));
  startConfig();
  appendProperty("test.bool1", "true");
  appendProperty("test.bool2", "false");
  appendProperty("test.bool3", "  true ");
  appendProperty("test.bool4", " false ");
  appendProperty("test.bool5", "foo");
  appendProperty("test.bool6", "TRUE");
  appendProperty("test.bool7", "FALSE");
  appendProperty("test.bool8", "");
  endConfig();
  Path fileResource = new Path(CONFIG);
  conf.addResource(fileResource);
  assertEquals(true, conf.getBoolean("test.bool1", false));
  assertEquals(false, conf.getBoolean("test.bool2", true));
  assertEquals(true, conf.getBoolean("test.bool3", false));
  assertEquals(false, conf.getBoolean("test.bool4", true));
  assertEquals(true, conf.getBoolean("test.bool5", true));
  assertEquals(true, conf.getBoolean("test.bool6", false));
  assertEquals(false, conf.getBoolean("test.bool7", true));
  assertEquals(false, conf.getBoolean("test.bool8", false));
}
项目:hadoop-oss    文件:TestFileStatus.java   
@Test
public void testCompareTo() throws IOException {
  Path path1 = new Path("path1");
  Path path2 = new Path("path2");
  FileStatus fileStatus1 =
      new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
          "one", "one", null, path1);
  FileStatus fileStatus2 =
      new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
          "one", "one", null, path2);
  assertTrue(fileStatus1.compareTo(fileStatus2) < 0);
  assertTrue(fileStatus2.compareTo(fileStatus1) > 0);

  List<FileStatus> statList = new ArrayList<>();
  statList.add(fileStatus1);
  statList.add(fileStatus2);
  assertTrue(Collections.binarySearch(statList, fileStatus1) > -1);
}
项目:hadoop    文件:FileInputFormat.java   
/**
 * Add files in the input path recursively into the results.
 * @param result
 *          The List to store all files.
 * @param fs
 *          The FileSystem.
 * @param path
 *          The input path.
 * @param inputFilter
 *          The input filter that can be used to filter files/dirs. 
 * @throws IOException
 */
protected void addInputPathRecursively(List<FileStatus> result,
    FileSystem fs, Path path, PathFilter inputFilter) 
    throws IOException {
  RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
  while (iter.hasNext()) {
    LocatedFileStatus stat = iter.next();
    if (inputFilter.accept(stat.getPath())) {
      if (stat.isDirectory()) {
        addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
      } else {
        result.add(stat);
      }
    }
  }
}
项目:hadoop    文件:TestSharedFileDescriptorFactory.java   
@Test(timeout=10000)
public void testCleanupRemainders() throws Exception {
  Assume.assumeTrue(NativeIO.isAvailable());
  Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
  File path = new File(TEST_BASE, "testCleanupRemainders");
  path.mkdirs();
  String remainder1 = path.getAbsolutePath() + 
      Path.SEPARATOR + "woot2_remainder1";
  String remainder2 = path.getAbsolutePath() +
      Path.SEPARATOR + "woot2_remainder2";
  createTempFile(remainder1);
  createTempFile(remainder2);
  SharedFileDescriptorFactory.create("woot2_", 
      new String[] { path.getAbsolutePath() });
  // creating the SharedFileDescriptorFactory should have removed 
  // the remainders
  Assert.assertFalse(new File(remainder1).exists());
  Assert.assertFalse(new File(remainder2).exists());
  FileUtil.fullyDelete(path);
}
项目:big-data-benchmark    文件:HadoopWordCount.java   
public static void main(String[] args) throws Exception {
    BasicConfigurator.configure();
    Configuration conf = new Configuration();
    conf.setQuietMode(true);

    Job job = Job.getInstance(conf, "WordCount");
    job.setJarByClass(HadoopWordCount.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(Map.class);
    job.setCombinerClass(Reduce.class);
    job.setReducerClass(Reduce.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    FileInputFormat.setInputPaths(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1] + "_" + System.currentTimeMillis()));

    long t = System.currentTimeMillis();
    job.waitForCompletion(true);

    System.out.println("TotalTime=" + (System.currentTimeMillis() - t));
}
项目:ditb    文件:FSTableDescriptors.java   
@Override
public Map<String, HTableDescriptor> getByNamespace(String name)
throws IOException {
  Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
  List<Path> tableDirs =
      FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
  for (Path d: tableDirs) {
    HTableDescriptor htd = null;
    try {
      htd = get(FSUtils.getTableName(d));
    } catch (FileNotFoundException fnfe) {
      // inability of retrieving one HTD shouldn't stop getting the remaining
      LOG.warn("Trouble retrieving htd", fnfe);
    }
    if (htd == null) continue;
    htds.put(FSUtils.getTableName(d).getNameAsString(), htd);
  }
  return htds;
}
项目:hadoop    文件:RandomTextWriterJob.java   
/**
 * This is the main routine for launching a distributed random write job.
 * It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
 * The reduce doesn't do anything.
 * 
 * @throws IOException 
 */
public int run(String[] args) throws Exception {    
  if (args.length == 0) {
    return printUsage();    
  }
  Job job = createJob(getConf());
  FileOutputFormat.setOutputPath(job, new Path(args[0]));
  Date startTime = new Date();
  System.out.println("Job started: " + startTime);
  int ret = job.waitForCompletion(true) ? 0 : 1;
  Date endTime = new Date();
  System.out.println("Job ended: " + endTime);
  System.out.println("The job took " + 
                     (endTime.getTime() - startTime.getTime()) /1000 + 
                     " seconds.");

  return ret;
}
项目:hadoop-oss    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:Wikipedia-Index    文件:MaxThreeLabel.java   
public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();

    Job job =Job.getInstance(conf);
    job.setJobName("MaxThreeLabel");
    job.setJarByClass(MaxThreeLabel.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(TextArrayWritable.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    job.setMapperClass(MaxThreeLabelMap.class);
    job.setReducerClass(MaxThreeLabelReduce.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    boolean wait = job.waitForCompletion(true);
    System.exit(wait ? 0 : 1);
}
项目:oryx2    文件:SaveToHDFSFunction.java   
@Override
public void call(JavaPairRDD<K,M> rdd, Time time) throws IOException {
  if (rdd.isEmpty()) {
    log.info("RDD was empty, not saving to HDFS");
  } else {
    String file = prefix + "-" + time.milliseconds() + "." + suffix;
    Path path = new Path(file);
    FileSystem fs = FileSystem.get(path.toUri(), hadoopConf);
    if (fs.exists(path)) {
      log.warn("Saved data already existed, possibly from a failed job. Deleting {}", path);
      fs.delete(path, true);
    }
    log.info("Saving RDD to HDFS at {}", file);
    rdd.mapToPair(
        new ValueToWritableFunction<>(keyClass, messageClass, keyWritableClass, messageWritableClass)
    ).saveAsNewAPIHadoopFile(
        file,
        keyWritableClass,
        messageWritableClass,
        SequenceFileOutputFormat.class,
        hadoopConf);
  }
}
项目:Transwarp-Sample-Code    文件:AvroEventSerializer.java   
private Schema loadFromUrl(String schemaUrl) throws IOException {
  Configuration conf = new Configuration();
  Schema.Parser parser = new Schema.Parser();
  if (schemaUrl.toLowerCase(Locale.ENGLISH).startsWith("hdfs://")) {
    FileSystem fs = FileSystem.get(conf);
    FSDataInputStream input = null;
    try {
      input = fs.open(new Path(schemaUrl));
      return parser.parse(input);
    } finally {
      if (input != null) {
        input.close();
      }
    }
  } else {
    InputStream is = null;
    try {
      is = new URL(schemaUrl).openStream();
      return parser.parse(is);
    } finally {
      if (is != null) {
        is.close();
      }
    }
  }
}
项目:ditb    文件:TestImportTsv.java   
@Test
public void testBulkOutputWithoutAnExistingTable() throws Exception {
  String table = "test-" + UUID.randomUUID();

  // Prepare the arguments required for the test.
  Path hfiles = new Path(util.getDataTestDirOnTestFS(table), "hfiles");
  String[] args = new String[] {
      "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B",
      "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
      "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(),
      table
  };

  doMROnTableTest(util, FAMILY, null, args, 3);
  util.deleteTable(table);
}
项目:ditb    文件:AbstractHFileWriter.java   
public AbstractHFileWriter(CacheConfig cacheConf,
    FSDataOutputStream outputStream, Path path, 
    KVComparator comparator, HFileContext fileContext) {
  this.outputStream = outputStream;
  this.path = path;
  this.name = path != null ? path.getName() : outputStream.toString();
  this.hFileContext = fileContext;
  DataBlockEncoding encoding = hFileContext.getDataBlockEncoding();
  if (encoding != DataBlockEncoding.NONE) {
    this.blockEncoder = new HFileDataBlockEncoderImpl(encoding);
  } else {
    this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
  }
  this.comparator = comparator != null ? comparator
      : KeyValue.COMPARATOR;

  closeOutputStream = path != null;
  this.cacheConf = cacheConf;
}
项目:hadoop    文件:TestDiskspaceQuotaUpdate.java   
/**
 * Test if the quota can be correctly updated for create file
 */
@Test (timeout=60000)
public void testQuotaUpdateWithFileCreate() throws Exception  {
  final Path foo = new Path(dir, "foo");
  Path createdFile = new Path(foo, "created_file.data");
  dfs.mkdirs(foo);
  dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
  long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
      fileLen, BLOCKSIZE, REPLICATION, seed);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());
  QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(2, cnt.getNameSpace());
  assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
}
项目:hadoop    文件:Gridmix.java   
/**
 * Setup gridmix for emulation of distributed cache load. This includes
 * generation of distributed cache files, if needed.
 * @param conf gridmix configuration
 * @param traceIn trace file path(if it is '-', then trace comes from the
 *                stream stdin)
 * @param ioPath &lt;ioPath&gt;/input/ is the dir where input data (a) exists
 *               or (b) is generated. &lt;ioPath&gt;/distributedCache/ is the
 *               folder where distributed cache data (a) exists or (b) is to be
 *               generated by gridmix.
 * @param generate true if -generate option was specified
 * @return exit code
 * @throws IOException
 * @throws InterruptedException
 */
private int setupDistCacheEmulation(Configuration conf, String traceIn,
    Path ioPath, boolean generate) throws IOException, InterruptedException {
  distCacheEmulator.init(traceIn, factory.jobCreator, generate);
  int exitCode = 0;
  if (distCacheEmulator.shouldGenerateDistCacheData() ||
      distCacheEmulator.shouldEmulateDistCacheLoad()) {

    JobStoryProducer jsp = createJobStoryProducer(traceIn, conf);
    exitCode = distCacheEmulator.setupGenerateDistCacheData(jsp);
    if (exitCode == 0) {
      // If there are files to be generated, run a MapReduce job to generate
      // these distributed cache files of all the simulated jobs of this trace.
      writeDistCacheData(conf);
    }
  }
  return exitCode;
}
项目:paraflow    文件:FSFactory.java   
public List<Path> listFiles(Path dirPath)
{
    List<Path> files = new ArrayList<>();
    if (!getFS().isPresent()) {
        throw new FileSystemNotFoundException("");
    }
    FileStatus[] fileStatuses = new FileStatus[0];
    try {
        fileStatuses = getFS().get().listStatus(dirPath);
    }
    catch (IOException e) {
        log.error(e);
    }
    for (FileStatus f : fileStatuses) {
        if (f.isFile()) {
            files.add(f.getPath());
        }
    }
    return files;
}
项目:hadoop    文件:AbstractContractCreateTest.java   
@Test
public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
  describe("Verify overwriting an existing file fails");
  Path path = path("testCreateFileOverExistingFileNoOverwrite");
  byte[] data = dataset(256, 'a', 'z');
  writeDataset(getFileSystem(), path, data, data.length, 1024, false);
  byte[] data2 = dataset(10 * 1024, 'A', 'Z');
  try {
    writeDataset(getFileSystem(), path, data2, data2.length, 1024, false);
    fail("writing without overwrite unexpectedly succeeded");
  } catch (FileAlreadyExistsException expected) {
    //expected
    handleExpectedException(expected);
  } catch (IOException relaxed) {
    handleRelaxedException("Creating a file over a file with overwrite==false",
                           "FileAlreadyExistsException",
                           relaxed);
  }
}
项目:hadoop    文件:TestSeekBug.java   
/**
* Test (expected to throw IOE) for <code>FSDataInpuStream#seek</code>
* when the position argument is larger than the file size.
*/
@Test (expected=IOException.class)
public void testSeekPastFileSize() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path seekFile = new Path("seekboundaries.dat");
    DFSTestUtil.createFile(
      fs,
      seekFile,
      ONEMB,
      fs.getDefaultReplication(seekFile),
      seed);
    FSDataInputStream stream = fs.open(seekFile);
    // Perform "safe seek" (expected to pass)
    stream.seek(65536);
    assertEquals(65536, stream.getPos());
    // expect IOE for this call
    stream.seek(ONEMB + ONEMB + ONEMB);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
项目:hadoop    文件:Merger.java   
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
                          Class<K> keyClass, Class<V> valueClass,
                          List<Segment<K, V>> segments,
                          int mergeFactor, Path tmpDir,
                          RawComparator<K> comparator, Progressable reporter,
                          boolean sortSegments,
                          Counters.Counter readsCounter,
                          Counters.Counter writesCounter,
                          Progress mergePhase)
    throws IOException {
  return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
                         sortSegments,
                         TaskType.REDUCE).merge(keyClass, valueClass,
                                             mergeFactor, tmpDir,
                                             readsCounter, writesCounter,
                                             mergePhase);
}
项目:hadoop    文件:HistoryFileManager.java   
private void mkdir(FileContext fc, Path path, FsPermission fsp)
    throws IOException {
  if (!fc.util().exists(path)) {
    try {
      fc.mkdir(path, fsp, true);

      FileStatus fsStatus = fc.getFileStatus(path);
      LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
          + ", Expected: " + fsp.toShort());
      if (fsStatus.getPermission().toShort() != fsp.toShort()) {
        LOG.info("Explicitly setting permissions to : " + fsp.toShort()
            + ", " + fsp);
        fc.setPermission(path, fsp);
      }
    } catch (FileAlreadyExistsException e) {
      LOG.info("Directory: [" + path + "] already exists.");
    }
  }
}
项目:hadoop    文件:TestFSDownload.java   
static LocalResource createJarFile(FileContext files, Path p, int len,
    Random r, LocalResourceVisibility vis) throws IOException,
    URISyntaxException {
  byte[] bytes = new byte[len];
  r.nextBytes(bytes);

  File archiveFile = new File(p.toUri().getPath() + ".jar");
  archiveFile.createNewFile();
  JarOutputStream out = new JarOutputStream(
      new FileOutputStream(archiveFile));
  out.putNextEntry(new JarEntry(p.getName()));
  out.write(bytes);
  out.closeEntry();
  out.close();

  LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
  ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(p.toString()
      + ".jar")));
  ret.setSize(len);
  ret.setType(LocalResourceType.ARCHIVE);
  ret.setVisibility(vis);
  ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".jar"))
      .getModificationTime());
  return ret;
}
项目:hadoop    文件:TestWebHdfsWithAuthenticationFilter.java   
@Test
public void testWebHdfsAuthFilter() throws IOException {
  // getFileStatus() is supposed to pass through with the default filter.
  authorized = false;
  try {
    fs.getFileStatus(new Path("/"));
    Assert.fail("The filter fails to block the request");
  } catch (IOException e) {
  }
  authorized = true;
  fs.getFileStatus(new Path("/"));
}
项目:hadoop    文件:DFSTestUtil.java   
public void waitReplication(FileSystem fs, String topdir, short value) 
    throws IOException, InterruptedException, TimeoutException {
  Path root = new Path(topdir);

  /** wait for the replication factor to settle down */
  for (int idx = 0; idx < nFiles; idx++) {
    waitReplication(fs, new Path(root, files[idx].getName()), value);
  }
}
项目:hadoop    文件:TestSequenceFileSync.java   
@Test
public void testLowSyncpoint() throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem fs = FileSystem.getLocal(conf);
  final Path path = new Path(System.getProperty("test.build.data", "/tmp"),
    "sequencefile.sync.test");
  final IntWritable input = new IntWritable();
  final Text val = new Text();
  SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, path,
      IntWritable.class, Text.class);
  try {
    writeSequenceFile(writer, NUMRECORDS);
    for (int i = 0; i < 5 ; i++) {
     final SequenceFile.Reader reader;

     //try different SequenceFile.Reader constructors
     if (i % 2 == 0) {
       reader = new SequenceFile.Reader(fs, path, conf);
     } else {
       final FSDataInputStream in = fs.open(path);
       final long length = fs.getFileStatus(path).getLen();
       final int buffersize = conf.getInt("io.file.buffer.size", 4096);
       reader = new SequenceFile.Reader(in, buffersize, 0L, length, conf);
     }

     try {
        forOffset(reader, input, val, i, 0, 0);
        forOffset(reader, input, val, i, 65, 0);
        forOffset(reader, input, val, i, 2000, 21);
        forOffset(reader, input, val, i, 0, 0);
      } finally {
        reader.close();
      }
    }
  } finally {
    fs.delete(path, false);
  }
}
项目:ditb    文件:HRegionFileSystem.java   
/**
 * Remove the region family from disk, archiving the store files.
 *
 * @param familyName Column Family Name
 * @throws IOException if an error occours during the archiving
 */
public void deleteFamily(final String familyName) throws IOException {
  // archive family store files
  HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));

  // delete the family folder
  Path familyDir = getStoreDir(familyName);
  if (fs.exists(familyDir) && !deleteDir(familyDir)) throw new IOException(
      "Could not delete family " + familyName + " from FileSystem for region " + regionInfoForFs
          .getRegionNameAsString() + "(" + regionInfoForFs.getEncodedName() + ")");
}
项目:hadoop    文件:ViewFileSystem.java   
@Override
public short getDefaultReplication(Path f) {
  try {
    InodeTree.ResolveResult<FileSystem> res =
      fsState.resolve(getUriPath(f), true);
    return res.targetFileSystem.getDefaultReplication(res.remainingPath);
  } catch (FileNotFoundException e) {
    throw new NotInMountpointException(f, "getDefaultReplication"); 
  }
}
项目:hadoop    文件:TestDynamicInputFormat.java   
private static DistCpOptions getOptions() throws Exception {
  Path sourcePath = new Path(cluster.getFileSystem().getUri().toString()
          + "/tmp/source");
  Path targetPath = new Path(cluster.getFileSystem().getUri().toString()
          + "/tmp/target");

  List<Path> sourceList = new ArrayList<Path>();
  sourceList.add(sourcePath);
  DistCpOptions options = new DistCpOptions(sourceList, targetPath);
  options.setMaxMaps(NUM_SPLITS);
  return options;
}
项目:LDA    文件:InitMapper.java   
@Override
protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    Configuration conf = context.getConfiguration();
    this.K = Integer.parseInt(conf.get("K"));
    ReadIndexFromFile(new Path(conf.get(Job.indexFile)), conf);
    docToTopic = new Matrix<Integer>(M, K, 0);
    topicToWord = new Matrix<Integer>(K, V, 0);
}
项目:hadoop-oss    文件:ChRootedFs.java   
@Override
public void createSymlink(final Path target, final Path link,
    final boolean createParent) throws IOException, UnresolvedLinkException {
  /*
   * We leave the link alone:
   * If qualified or link relative then of course it is okay.
   * If absolute (ie / relative) then the link has to be resolved
   * relative to the changed root.
   */
  myFs.createSymlink(fullPath(target), link, createParent);
}
项目:hadoop    文件:DistTool.java   
protected static List<String> readFile(Configuration conf, Path inputfile
    ) throws IOException {
  List<String> result = new ArrayList<String>();
  FileSystem fs = inputfile.getFileSystem(conf);
  try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.open(inputfile),
          Charset.forName("UTF-8")))) {
    for(String line; (line = input.readLine()) != null;) {
      result.add(line);
    }
  }
  return result;
}
项目:hadoop-oss    文件:LocalDirAllocator.java   
/**
 * Get all of the paths that currently exist in the working directories.
 * @param pathStr the path underneath the roots
 * @param conf the configuration to look up the roots in
 * @return all of the paths that exist under any of the roots
 * @throws IOException
 */
public Iterable<Path> getAllLocalPathsToRead(String pathStr, 
                                             Configuration conf
                                             ) throws IOException {
  AllocatorPerContext context;
  synchronized (this) {
    context = obtainContext(contextCfgItemName);
  }
  return context.getAllLocalPathsToRead(pathStr, conf);    
}
项目:QDrill    文件:TestParquetMetadataCache.java   
@BeforeClass
public static void copyData() throws Exception {
  // copy the data into the temporary location
  String tmpLocation = getDfsTestTmpSchemaLocation();
  File dataDir = new File(tmpLocation + Path.SEPARATOR + tableName);
  dataDir.mkdir();
  FileUtils.copyDirectory(new File(String.format(String.format("%s/multilevel/parquet", TEST_RES_PATH))),
      dataDir);
}