Java 类org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor 实例源码

项目:ditb    文件:TestCompaction.java   
@Test
public void testCompactionWithCorruptResult() throws Exception {
  int nfiles = 10;
  for (int i = 0; i < nfiles; i++) {
    createStoreFile(r);
  }
  HStore store = (HStore) r.getStore(COLUMN_FAMILY);

  Collection<StoreFile> storeFiles = store.getStorefiles();
  DefaultCompactor tool = (DefaultCompactor) store.storeEngine.getCompactor();
  tool.compactForTesting(storeFiles, false);

  // Now lets corrupt the compacted file.
  FileSystem fs = store.getFileSystem();
  // default compaction policy created one and only one new compacted file
  Path dstPath = store.getRegionFileSystem().createTempName();
  FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short) 3, (long) 1024, null);
  stream.writeChars("CORRUPT FILE!!!!");
  stream.close();
  Path origPath =
      store.getRegionFileSystem().commitStoreFile(Bytes.toString(COLUMN_FAMILY), dstPath);

  try {
    ((HStore) store).moveFileIntoPlace(origPath);
  } catch (Exception e) {
    // The complete compaction should fail and the corrupt file should remain
    // in the 'tmp' directory;
    assert (fs.exists(origPath));
    assert (!fs.exists(dstPath));
    System.out.println("testCompactionWithCorruptResult Passed");
    return;
  }
  fail("testCompactionWithCorruptResult failed since no exception was"
      + "thrown while completing a corrupt file");
}
项目:pbase    文件:TestCompaction.java   
@Test
public void testCompactionWithCorruptResult() throws Exception {
  int nfiles = 10;
  for (int i = 0; i < nfiles; i++) {
    createStoreFile(r);
  }
  HStore store = (HStore) r.getStore(COLUMN_FAMILY);

  Collection<StoreFile> storeFiles = store.getStorefiles();
  DefaultCompactor tool = (DefaultCompactor)store.storeEngine.getCompactor();
  tool.compactForTesting(storeFiles, false);

  // Now lets corrupt the compacted file.
  FileSystem fs = store.getFileSystem();
  // default compaction policy created one and only one new compacted file
  Path dstPath = store.getRegionFileSystem().createTempName();
  FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, (long)1024, null);
  stream.writeChars("CORRUPT FILE!!!!");
  stream.close();
  Path origPath = store.getRegionFileSystem().commitStoreFile(
    Bytes.toString(COLUMN_FAMILY), dstPath);

  try {
    ((HStore)store).moveFileIntoPlace(origPath);
  } catch (Exception e) {
    // The complete compaction should fail and the corrupt file should remain
    // in the 'tmp' directory;
    assert (fs.exists(origPath));
    assert (!fs.exists(dstPath));
    System.out.println("testCompactionWithCorruptResult Passed");
    return;
  }
  fail("testCompactionWithCorruptResult failed since no exception was" +
      "thrown while completing a corrupt file");
}
项目:HIndex    文件:TestCompaction.java   
@Test
public void testCompactionWithCorruptResult() throws Exception {
  int nfiles = 10;
  for (int i = 0; i < nfiles; i++) {
    createStoreFile(r);
  }
  HStore store = (HStore) r.getStore(COLUMN_FAMILY);

  Collection<StoreFile> storeFiles = store.getStorefiles();
  DefaultCompactor tool = (DefaultCompactor)store.storeEngine.getCompactor();
  tool.compactForTesting(storeFiles, false);

  // Now lets corrupt the compacted file.
  FileSystem fs = store.getFileSystem();
  // default compaction policy created one and only one new compacted file
  Path dstPath = store.getRegionFileSystem().createTempName();
  FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, (long)1024, null);
  stream.writeChars("CORRUPT FILE!!!!");
  stream.close();
  Path origPath = store.getRegionFileSystem().commitStoreFile(
    Bytes.toString(COLUMN_FAMILY), dstPath);

  try {
    ((HStore)store).moveFileIntoPlace(origPath);
  } catch (Exception e) {
    // The complete compaction should fail and the corrupt file should remain
    // in the 'tmp' directory;
    assert (fs.exists(origPath));
    assert (!fs.exists(dstPath));
    System.out.println("testCompactionWithCorruptResult Passed");
    return;
  }
  fail("testCompactionWithCorruptResult failed since no exception was" +
      "thrown while completing a corrupt file");
}
项目:hbase    文件:TestCompaction.java   
@Test
public void testCompactionWithCorruptResult() throws Exception {
  int nfiles = 10;
  for (int i = 0; i < nfiles; i++) {
    createStoreFile(r);
  }
  HStore store = r.getStore(COLUMN_FAMILY);

  Collection<HStoreFile> storeFiles = store.getStorefiles();
  DefaultCompactor tool = (DefaultCompactor)store.storeEngine.getCompactor();
  tool.compactForTesting(storeFiles, false);

  // Now lets corrupt the compacted file.
  FileSystem fs = store.getFileSystem();
  // default compaction policy created one and only one new compacted file
  Path dstPath = store.getRegionFileSystem().createTempName();
  FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, 1024L, null);
  stream.writeChars("CORRUPT FILE!!!!");
  stream.close();
  Path origPath = store.getRegionFileSystem().commitStoreFile(
    Bytes.toString(COLUMN_FAMILY), dstPath);

  try {
    ((HStore)store).moveFileIntoPlace(origPath);
  } catch (Exception e) {
    // The complete compaction should fail and the corrupt file should remain
    // in the 'tmp' directory;
    assertTrue(fs.exists(origPath));
    assertFalse(fs.exists(dstPath));
    System.out.println("testCompactionWithCorruptResult Passed");
    return;
  }
  fail("testCompactionWithCorruptResult failed since no exception was" +
      "thrown while completing a corrupt file");
}
项目:PyroDB    文件:TestCompaction.java   
@Test
public void testCompactionWithCorruptResult() throws Exception {
  int nfiles = 10;
  for (int i = 0; i < nfiles; i++) {
    createStoreFile(r);
  }
  HStore store = (HStore) r.getStore(COLUMN_FAMILY);

  Collection<StoreFile> storeFiles = store.getStorefiles();
  DefaultCompactor tool = (DefaultCompactor)store.storeEngine.getCompactor();
  tool.compactForTesting(storeFiles, false);

  // Now lets corrupt the compacted file.
  FileSystem fs = store.getFileSystem();
  // default compaction policy created one and only one new compacted file
  Path dstPath = store.getRegionFileSystem().createTempName();
  FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, (long)1024, null);
  stream.writeChars("CORRUPT FILE!!!!");
  stream.close();
  Path origPath = store.getRegionFileSystem().commitStoreFile(
    Bytes.toString(COLUMN_FAMILY), dstPath);

  try {
    ((HStore)store).moveFileIntoPlace(origPath);
  } catch (Exception e) {
    // The complete compaction should fail and the corrupt file should remain
    // in the 'tmp' directory;
    assert (fs.exists(origPath));
    assert (!fs.exists(dstPath));
    System.out.println("testCompactionWithCorruptResult Passed");
    return;
  }
  fail("testCompactionWithCorruptResult failed since no exception was" +
      "thrown while completing a corrupt file");
}
项目:ditb    文件:HStore.java   
/**
 * This method tries to compact N recent files for testing. Note that because compacting "recent"
 * files only makes sense for some policies, e.g. the default one, it assumes default policy is
 * used. It doesn't use policy, but instead makes a compaction candidate list by itself.
 *
 * @param N Number of files.
 */
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
  List<StoreFile> filesToCompact;
  boolean isMajor;

  this.lock.readLock().lock();
  try {
    synchronized (filesCompacting) {
      filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
      if (!filesCompacting.isEmpty()) {
        // exclude all files older than the newest file we're currently
        // compacting. this allows us to preserve contiguity (HBASE-2856)
        StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
        int idx = filesToCompact.indexOf(last);
        Preconditions.checkArgument(idx != -1);
        filesToCompact.subList(0, idx + 1).clear();
      }
      int count = filesToCompact.size();
      if (N > count) {
        throw new RuntimeException("Not enough files");
      }

      filesToCompact = filesToCompact.subList(count - N, count);
      isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
      filesCompacting.addAll(filesToCompact);
      Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
    }
  } finally {
    this.lock.readLock().unlock();
  }

  try {
    // Ready to go. Have list of files to compact.
    List<Path> newFiles = ((DefaultCompactor) this.storeEngine.getCompactor())
        .compactForTesting(filesToCompact, isMajor);
    for (Path newFile : newFiles) {
      // Move the compaction into place.
      StoreFile sf = moveFileIntoPlace(newFile);
      if (this.getCoprocessorHost() != null) {
        this.getCoprocessorHost().postCompact(this, sf, null);
      }
      replaceStoreFiles(filesToCompact, Lists.newArrayList(sf));
      completeCompaction(filesToCompact, true);
    }
  } finally {
    synchronized (filesCompacting) {
      filesCompacting.removeAll(filesToCompact);
    }
  }
}
项目:pbase    文件:HStore.java   
/**
 * This method tries to compact N recent files for testing.
 * Note that because compacting "recent" files only makes sense for some policies,
 * e.g. the default one, it assumes default policy is used. It doesn't use policy,
 * but instead makes a compaction candidate list by itself.
 *
 * @param N Number of files.
 */
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
    List<StoreFile> filesToCompact;
    boolean isMajor;

    this.lock.readLock().lock();
    try {
        synchronized (filesCompacting) {
            filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
            if (!filesCompacting.isEmpty()) {
                // exclude all files older than the newest file we're currently
                // compacting. this allows us to preserve contiguity (HBASE-2856)
                StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
                int idx = filesToCompact.indexOf(last);
                Preconditions.checkArgument(idx != -1);
                filesToCompact.subList(0, idx + 1).clear();
            }
            int count = filesToCompact.size();
            if (N > count) {
                throw new RuntimeException("Not enough files");
            }

            filesToCompact = filesToCompact.subList(count - N, count);
            isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
            filesCompacting.addAll(filesToCompact);
            Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
        }
    } finally {
        this.lock.readLock().unlock();
    }

    try {
        // Ready to go. Have list of files to compact.
        List<Path> newFiles = ((DefaultCompactor) this.storeEngine.getCompactor())
                .compactForTesting(filesToCompact, isMajor);
        for (Path newFile : newFiles) {
            // Move the compaction into place.
            StoreFile sf = moveFileIntoPlace(newFile);
            if (this.getCoprocessorHost() != null) {
                this.getCoprocessorHost().postCompact(this, sf, null);
            }
            replaceStoreFiles(filesToCompact, Lists.newArrayList(sf));
            completeCompaction(filesToCompact, true);
        }
    } finally {
        synchronized (filesCompacting) {
            filesCompacting.removeAll(filesToCompact);
        }
    }
}
项目:HIndex    文件:HStore.java   
/**
 * This method tries to compact N recent files for testing.
 * Note that because compacting "recent" files only makes sense for some policies,
 * e.g. the default one, it assumes default policy is used. It doesn't use policy,
 * but instead makes a compaction candidate list by itself.
 * @param N Number of files.
 */
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
  List<StoreFile> filesToCompact;
  boolean isMajor;

  this.lock.readLock().lock();
  try {
    synchronized (filesCompacting) {
      filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
      if (!filesCompacting.isEmpty()) {
        // exclude all files older than the newest file we're currently
        // compacting. this allows us to preserve contiguity (HBASE-2856)
        StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
        int idx = filesToCompact.indexOf(last);
        Preconditions.checkArgument(idx != -1);
        filesToCompact.subList(0, idx + 1).clear();
      }
      int count = filesToCompact.size();
      if (N > count) {
        throw new RuntimeException("Not enough files");
      }

      filesToCompact = filesToCompact.subList(count - N, count);
      isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
      filesCompacting.addAll(filesToCompact);
      Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
    }
  } finally {
    this.lock.readLock().unlock();
  }

  try {
    // Ready to go. Have list of files to compact.
    List<Path> newFiles = ((DefaultCompactor)this.storeEngine.getCompactor())
        .compactForTesting(filesToCompact, isMajor);
    for (Path newFile: newFiles) {
      // Move the compaction into place.
      StoreFile sf = moveFileIntoPlace(newFile);
      if (this.getCoprocessorHost() != null) {
        this.getCoprocessorHost().postCompact(this, sf, null);
      }
      replaceStoreFiles(filesToCompact, Lists.newArrayList(sf));
      completeCompaction(filesToCompact);
    }
  } finally {
    synchronized (filesCompacting) {
      filesCompacting.removeAll(filesToCompact);
    }
  }
}
项目:hbase    文件:HStore.java   
/**
 * This method tries to compact N recent files for testing.
 * Note that because compacting "recent" files only makes sense for some policies,
 * e.g. the default one, it assumes default policy is used. It doesn't use policy,
 * but instead makes a compaction candidate list by itself.
 * @param N Number of files.
 */
@VisibleForTesting
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
  List<HStoreFile> filesToCompact;
  boolean isMajor;

  this.lock.readLock().lock();
  try {
    synchronized (filesCompacting) {
      filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
      if (!filesCompacting.isEmpty()) {
        // exclude all files older than the newest file we're currently
        // compacting. this allows us to preserve contiguity (HBASE-2856)
        HStoreFile last = filesCompacting.get(filesCompacting.size() - 1);
        int idx = filesToCompact.indexOf(last);
        Preconditions.checkArgument(idx != -1);
        filesToCompact.subList(0, idx + 1).clear();
      }
      int count = filesToCompact.size();
      if (N > count) {
        throw new RuntimeException("Not enough files");
      }

      filesToCompact = filesToCompact.subList(count - N, count);
      isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
      filesCompacting.addAll(filesToCompact);
      Collections.sort(filesCompacting, storeEngine.getStoreFileManager()
          .getStoreFileComparator());
    }
  } finally {
    this.lock.readLock().unlock();
  }

  try {
    // Ready to go. Have list of files to compact.
    List<Path> newFiles = ((DefaultCompactor)this.storeEngine.getCompactor())
        .compactForTesting(filesToCompact, isMajor);
    for (Path newFile: newFiles) {
      // Move the compaction into place.
      HStoreFile sf = moveFileIntoPlace(newFile);
      if (this.getCoprocessorHost() != null) {
        this.getCoprocessorHost().postCompact(this, sf, null, null, null);
      }
      replaceStoreFiles(filesToCompact, Collections.singletonList(sf));
      completeCompaction(filesToCompact);
    }
  } finally {
    synchronized (filesCompacting) {
      filesCompacting.removeAll(filesToCompact);
    }
  }
}
项目:PyroDB    文件:HStore.java   
/**
 * This method tries to compact N recent files for testing.
 * Note that because compacting "recent" files only makes sense for some policies,
 * e.g. the default one, it assumes default policy is used. It doesn't use policy,
 * but instead makes a compaction candidate list by itself.
 * @param N Number of files.
 */
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
  List<StoreFile> filesToCompact;
  boolean isMajor;

  this.lock.readLock().lock();
  try {
    synchronized (filesCompacting) {
      filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
      if (!filesCompacting.isEmpty()) {
        // exclude all files older than the newest file we're currently
        // compacting. this allows us to preserve contiguity (HBASE-2856)
        StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
        int idx = filesToCompact.indexOf(last);
        Preconditions.checkArgument(idx != -1);
        filesToCompact.subList(0, idx + 1).clear();
      }
      int count = filesToCompact.size();
      if (N > count) {
        throw new RuntimeException("Not enough files");
      }

      filesToCompact = filesToCompact.subList(count - N, count);
      isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
      filesCompacting.addAll(filesToCompact);
      Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
    }
  } finally {
    this.lock.readLock().unlock();
  }

  try {
    // Ready to go. Have list of files to compact.
    List<Path> newFiles = ((DefaultCompactor)this.storeEngine.getCompactor())
        .compactForTesting(filesToCompact, isMajor);
    for (Path newFile: newFiles) {
      // Move the compaction into place.
      StoreFile sf = moveFileIntoPlace(newFile);
      if (this.getCoprocessorHost() != null) {
        this.getCoprocessorHost().postCompact(this, sf, null);
      }
      replaceStoreFiles(filesToCompact, Lists.newArrayList(sf));
      completeCompaction(filesToCompact);
    }
  } finally {
    synchronized (filesCompacting) {
      filesCompacting.removeAll(filesToCompact);
    }
  }
}