Java 类org.apache.lucene.index.SerialMergeScheduler 实例源码

项目:community-edition-old    文件:IndexInfo.java   
/**
 * Make a lucene index writer
 * 
 * @param location File
 * @param analyzer Analyzer
 * @return IndexWriter
 * @throws IOException
 */
private IndexWriter makeDeltaIndexWriter(File location, Analyzer analyzer) throws IOException
{
    IndexWriter writer;
    if (!IndexReader.indexExists(location))
    {
        writer = new IndexWriter(location, analyzer, true, MaxFieldLength.LIMITED);
    }
    else
    {
        writer = new IndexWriter(location, analyzer, false, MaxFieldLength.LIMITED);
    }
    writer.setUseCompoundFile(writerUseCompoundFile);
    writer.setMaxBufferedDocs(writerMaxBufferedDocs);
    writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
    writer.setMergeFactor(writerMergeFactor);
    writer.setMaxMergeDocs(writerMaxMergeDocs);
    writer.setWriteLockTimeout(writeLockTimeout);
    writer.setMaxFieldLength(maxFieldLength);
    writer.setTermIndexInterval(termIndexInterval);
    writer.setMergeScheduler(new SerialMergeScheduler());
    writer.setMergePolicy(new LogDocMergePolicy());
    return writer;

}
项目:opensearchserver    文件:WriterLocal.java   
private IndexWriter open(boolean create) throws IOException, SearchLibException {
    indexWriterLock.lock();
    final IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_36, null);
    config.setOpenMode(create ? OpenMode.CREATE_OR_APPEND : OpenMode.APPEND);
    config.setMergeScheduler(new SerialMergeScheduler());
    config.setWriteLockTimeout(indexConfig.getWriteLockTimeout());
    config.setRAMBufferSizeMB(128);
    final Similarity similarity = indexConfig.getNewSimilarityInstance();
    if (similarity != null)
        config.setSimilarity(similarity);
    if (!create) {
        final SnapshotDeletionPolicy snapshotDeletionPolicy =
                new SnapshotDeletionPolicy(config.getIndexDeletionPolicy());
        config.setIndexDeletionPolicy(snapshotDeletionPolicy);
    }
    Logging.debug("WriteLocal open " + indexDirectory.getDirectory());
    return new IndexWriter(indexDirectory.getDirectory(), config);
}
项目:search    文件:TestEarlyTermination.java   
private void createRandomIndex() throws IOException {
  dir = newDirectory();
  numDocs = atLeast(150);
  final int numTerms = TestUtil.nextInt(random(), 1, numDocs / 5);
  Set<String> randomTerms = new HashSet<>();
  while (randomTerms.size() < numTerms) {
    randomTerms.add(TestUtil.randomSimpleString(random()));
  }
  terms = new ArrayList<>(randomTerms);
  final long seed = random().nextLong();
  final IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(new Random(seed)));
  iwc.setMergeScheduler(new SerialMergeScheduler()); // for reproducible tests
  iwc.setMergePolicy(TestSortingMergePolicy.newSortingMergePolicy(sort));
  iw = new RandomIndexWriter(new Random(seed), dir, iwc);
  iw.setDoRandomForceMerge(false); // don't do this, it may happen anyway with MockRandomMP
  for (int i = 0; i < numDocs; ++i) {
    final Document doc = randomDocument();
    iw.addDocument(doc);
    if (i == numDocs / 2 || (i != numDocs - 1 && random().nextInt(8) == 0)) {
      iw.commit();
    }
    if (random().nextInt(15) == 0) {
      final String term = RandomPicks.randomFrom(random(), terms);
      iw.deleteDocuments(new Term("s", term));
    }
  }
  if (random().nextBoolean()) {
    iw.forceMerge(5);
  }
  reader = iw.getReader();
}
项目:luceneappengine    文件:GaeLuceneUtil.java   
/**
 * Method that return {@link IndexWriterConfig} properly configured in order to 
 * work in google app engine environment.
 * @param analyzer The analyzer to use
 * @return An {@link IndexWriterConfig} properly configured
 */
@SuppressWarnings("resource")//SerialMergeSceduler is Closable
public static IndexWriterConfig getIndexWriterConfig(Analyzer analyzer) {
    final IndexWriterConfig config = new IndexWriterConfig(analyzer);
    config.setMergeScheduler(new SerialMergeScheduler());
    return config;
}
项目:search    文件:LuceneTestCase.java   
/** create a new index writer config with random defaults using the specified random */
public static IndexWriterConfig newIndexWriterConfig(Random r, Version v, Analyzer a) {
  IndexWriterConfig c = new IndexWriterConfig(v, a);
  c.setSimilarity(classEnvRule.similarity);
  if (VERBOSE) {
    // Even though TestRuleSetupAndRestoreClassEnv calls
    // InfoStream.setDefault, we do it again here so that
    // the PrintStreamInfoStream.messageID increments so
    // that when there are separate instances of
    // IndexWriter created we see "IW 0", "IW 1", "IW 2",
    // ... instead of just always "IW 0":
    c.setInfoStream(new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream(System.out));
  }

  if (r.nextBoolean()) {
    c.setMergeScheduler(new SerialMergeScheduler());
  } else if (rarely(r)) {
    int maxThreadCount = TestUtil.nextInt(r, 1, 4);
    int maxMergeCount = TestUtil.nextInt(r, maxThreadCount, maxThreadCount+4);
    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
    cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
    c.setMergeScheduler(cms);
  }
  if (r.nextBoolean()) {
    if (rarely(r)) {
      // crazy value
      c.setMaxBufferedDocs(TestUtil.nextInt(r, 2, 15));
    } else {
      // reasonable value
      c.setMaxBufferedDocs(TestUtil.nextInt(r, 16, 1000));
    }
  }
  if (r.nextBoolean()) {
    if (rarely(r)) {
      // crazy value
      c.setTermIndexInterval(r.nextBoolean() ? TestUtil.nextInt(r, 1, 31) : TestUtil.nextInt(r, 129, 1000));
    } else {
      // reasonable value
      c.setTermIndexInterval(TestUtil.nextInt(r, 32, 128));
    }
  }
  if (r.nextBoolean()) {
    int maxNumThreadStates = rarely(r) ? TestUtil.nextInt(r, 5, 20) // crazy value
        : TestUtil.nextInt(r, 1, 4); // reasonable value

    c.setMaxThreadStates(maxNumThreadStates);
  }

  c.setMergePolicy(newMergePolicy(r));

  if (rarely(r)) {
    c.setMergedSegmentWarmer(new SimpleMergedSegmentWarmer(c.getInfoStream()));
  }
  c.setUseCompoundFile(r.nextBoolean());
  c.setReaderPooling(r.nextBoolean());
  c.setReaderTermsIndexDivisor(TestUtil.nextInt(r, 1, 4));
  c.setCheckIntegrityAtMerge(r.nextBoolean());
  return c;
}
项目:xodus    文件:ExodusLuceneTestsBase.java   
protected void createIndexWriterConfig() {
    indexConfig = new IndexWriterConfig(LUCENE_VERSION, analyzer);
    indexConfig.setMergeScheduler(new SerialMergeScheduler());
    indexConfig.setMaxThreadStates(1);
}
项目:incubator-blur    文件:FastHdfsKeyValueDirectoryTest.java   
@Test
public void testMulipleCommitsAndReopens() throws IOException {
  IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
  conf.setMergeScheduler(new SerialMergeScheduler());
  TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
  mergePolicy.setUseCompoundFile(false);

  Set<String> fileSet = new TreeSet<String>();
  long seed = new Random().nextLong();
  System.out.println("Seed:" + seed);
  Random random = new Random(seed);
  int docCount = 0;
  int passes = 10;
  byte[] segmentsGenContents = null;
  for (int run = 0; run < passes; run++) {
    final FastHdfsKeyValueDirectory directory = new FastHdfsKeyValueDirectory(false, _timer, _configuration,
        new Path(_path, "test_multiple_commits_reopens"));
    if (segmentsGenContents != null) {
      byte[] segmentsGenContentsCurrent = readSegmentsGen(directory);
      assertTrue(Arrays.equals(segmentsGenContents, segmentsGenContentsCurrent));
    }
    assertFiles(fileSet, run, -1, directory);
    assertEquals(docCount, getDocumentCount(directory));
    IndexWriter writer = new IndexWriter(directory, conf.clone());
    int numberOfCommits = random.nextInt(100);
    for (int i = 0; i < numberOfCommits; i++) {
      assertFiles(fileSet, run, i, directory);
      addDocuments(writer, random.nextInt(100));
      // Before Commit
      writer.commit();
      // After Commit

      // Set files after commit
      {
        fileSet.clear();
        List<IndexCommit> listCommits = DirectoryReader.listCommits(directory);
        assertEquals(1, listCommits.size());
        IndexCommit indexCommit = listCommits.get(0);
        fileSet.addAll(indexCommit.getFileNames());
      }
      segmentsGenContents = readSegmentsGen(directory);
    }
    docCount = getDocumentCount(directory);
  }
}