Java 类org.apache.lucene.index.MergePolicy 实例源码

项目:elasticsearch_my    文件:InternalEngine.java   
private IndexWriterConfig getIndexWriterConfig(boolean create) {
    final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
    iwc.setCommitOnClose(false); // we by default don't commit on close
    iwc.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);
    iwc.setIndexDeletionPolicy(deletionPolicy);
    // with tests.verbose, lucene sets this up: plumb to align with filesystem stream
    boolean verbose = false;
    try {
        verbose = Boolean.parseBoolean(System.getProperty("tests.verbose"));
    } catch (Exception ignore) {
    }
    iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger));
    iwc.setMergeScheduler(mergeScheduler);
    MergePolicy mergePolicy = config().getMergePolicy();
    // Give us the opportunity to upgrade old segments while performing
    // background merges
    mergePolicy = new ElasticsearchMergePolicy(mergePolicy);
    iwc.setMergePolicy(mergePolicy);
    iwc.setSimilarity(engineConfig.getSimilarity());
    iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
    iwc.setCodec(engineConfig.getCodec());
    iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
    return iwc;
}
项目:Elasticsearch    文件:DLBasedEngine.java   
@Override
protected void handleMergeException(final Directory dir, final Throwable exc) {
    logger.error("failed to merge", exc);
    if (config().getMergeSchedulerConfig().isNotifyOnMergeFailure()) {
        engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
            @Override
            public void onFailure(Throwable t) {
                logger.debug("merge failure action rejected", t);
            }

            @Override
            protected void doRun() throws Exception {
                MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir);
                failEngine("merge failed", e);
            }
        });
    }
}
项目:Elasticsearch    文件:InternalEngine.java   
@Override
protected void handleMergeException(final Directory dir, final Throwable exc) {
    logger.error("failed to merge", exc);
    if (config().getMergeSchedulerConfig().isNotifyOnMergeFailure()) {
        engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
            @Override
            public void onFailure(Throwable t) {
                logger.debug("merge failure action rejected", t);
            }

            @Override
            protected void doRun() throws Exception {
                MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir);
                failEngine("merge failed", e);
            }
        });
    }
}
项目:linden    文件:SortingMergePolicyFactory.java   
@Override
public MergePolicy getInstance(Map<String, String> params) throws IOException {
  String field = params.get(SORT_FIELD);
  SortField.Type sortFieldType = SortField.Type.DOC;
  if (params.containsKey(SORT_FIELD_TYPE)) {
    sortFieldType = SortField.Type.valueOf(params.get(SORT_FIELD_TYPE).toUpperCase());
  }

  if (sortFieldType == SortField.Type.DOC) {
    throw new IOException(
        "Relying on internal lucene DocIDs is not guaranteed to work, this is only an implementation detail.");
  }

  boolean desc = true;
  if (params.containsKey(SORT_DESC)) {
    try {
      desc = Boolean.valueOf(params.get(SORT_DESC));
    } catch (Exception e) {
      desc = true;
    }
  }
  SortField sortField = new SortField(field, sortFieldType, desc);
  Sort sort = new Sort(sortField);
  return new SortingMergePolicyDecorator(new TieredMergePolicy(), sort);
}
项目:search    文件:SortingMergePolicy.java   
@Override
public MergePolicy.DocMap getDocMap(final MergeState mergeState) {
  if (unsortedReaders == null) {
    throw new IllegalStateException();
  }
  if (docMap == null) {
    return super.getDocMap(mergeState);
  }
  assert mergeState.docMaps.length == 1; // we returned a singleton reader
  final PackedLongValues deletes = getDeletes(unsortedReaders);
  return new MergePolicy.DocMap() {
    @Override
    public int map(int old) {
      final int oldWithDeletes = old + (int) deletes.get(old);
      final int newWithDeletes = docMap.oldToNew(oldWithDeletes);
      return mergeState.docMaps[0].get(newWithDeletes);
    }
  };
}
项目:search    文件:TestSortingMergePolicy.java   
static MergePolicy newSortingMergePolicy(Sort sort) {
  // usually create a MP with a low merge factor so that many merges happen
  MergePolicy mp;
  int thingToDo = random().nextInt(3);
  if (thingToDo == 0) {
    TieredMergePolicy tmp = newTieredMergePolicy(random());
    final int numSegs = TestUtil.nextInt(random(), 3, 5);
    tmp.setSegmentsPerTier(numSegs);
    tmp.setMaxMergeAtOnce(TestUtil.nextInt(random(), 2, numSegs));
    mp = tmp;
  } else if (thingToDo == 1) {
    LogMergePolicy lmp = newLogMergePolicy(random());
    lmp.setMergeFactor(TestUtil.nextInt(random(), 3, 5));
    mp = lmp;
  } else {
    // just a regular random one from LTC (could be alcoholic etc)
    mp = newMergePolicy();
  }
  // wrap it with a sorting mp
  return new SortingMergePolicy(mp, sort);
}
项目:search    文件:TestUtil.java   
/** just tries to configure things to keep the open file
 * count lowish */
public static void reduceOpenFiles(IndexWriter w) {
  // keep number of open files lowish
  MergePolicy mp = w.getConfig().getMergePolicy();
  if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
    lmp.setNoCFSRatio(1.0);
  } else if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
    tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
    tmp.setNoCFSRatio(1.0);
  }
  MergeScheduler ms = w.getConfig().getMergeScheduler();
  if (ms instanceof ConcurrentMergeScheduler) {
    // wtf... shouldnt it be even lower since its 1 by default?!?!
    ((ConcurrentMergeScheduler) ms).setMaxMergesAndThreads(3, 2);
  }
}
项目:search    文件:TestRandomMergePolicy.java   
/**
 * Ensure any "getter" methods return the same value as
 * the wrapped MP
 * (future proof ourselves against new final getter/setter pairs being 
 * added to MP w/o dealing with them in the RMP Constructor)
 */
public void testGetters() throws IllegalAccessException, InvocationTargetException {
  final int iters = atLeast(20);
  for (int i = 0; i < iters; i++) {
    RandomMergePolicy rmp = new RandomMergePolicy();
    Class mp = MergePolicy.class;
    for (Method meth : mp.getDeclaredMethods()) {
      if (meth.getName().startsWith("get") &&
          (0 == meth.getParameterTypes().length)) {

        assertEquals("MergePolicy getter gave diff results for RandomMergePolicy and the policy it wrapped: " + meth.toGenericString(),
                     meth.invoke(rmp), meth.invoke(rmp.inner));
      }
    }
  }
}
项目:NYBC    文件:_TestUtil.java   
/** just tries to configure things to keep the open file
 * count lowish */
public static void reduceOpenFiles(IndexWriter w) {
  // keep number of open files lowish
  MergePolicy mp = w.getConfig().getMergePolicy();
  if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
    lmp.setUseCompoundFile(true);
  } else if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
    tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
    tmp.setUseCompoundFile(true);
  }
  MergeScheduler ms = w.getConfig().getMergeScheduler();
  if (ms instanceof ConcurrentMergeScheduler) {
    ((ConcurrentMergeScheduler) ms).setMaxThreadCount(2);
    ((ConcurrentMergeScheduler) ms).setMaxMergeCount(3);
  }
}
项目:NYBC    文件:TestMergePolicyConfig.java   
public void testTieredMergePolicyConfig() throws Exception {
  IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getSchema());
  MergePolicy mp = iwc.getMergePolicy();
  assertTrue(mp instanceof TieredMergePolicy);
  TieredMergePolicy tieredMP = (TieredMergePolicy) mp;

  // mp-specific setter
  assertEquals(19, tieredMP.getMaxMergeAtOnceExplicit());

  // make sure we apply compoundFile and mergeFactor
  assertEquals(false, tieredMP.getUseCompoundFile());
  assertEquals(7, tieredMP.getMaxMergeAtOnce());

  // make sure we overrode segmentsPerTier (split from maxMergeAtOnce out of mergeFactor)
  assertEquals(9D, tieredMP.getSegmentsPerTier(), 0.001);

  // make sure we overrode noCFSRatio (useless because we disabled useCompoundFile,
  // but just to make sure it works)
  assertEquals(1.0D, tieredMP.getNoCFSRatio(), 0.001);
}
项目:search-core    文件:TestMergePolicyConfig.java   
public void testTieredMergePolicyConfig() throws Exception {
  IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getSchema());
  MergePolicy mp = iwc.getMergePolicy();
  assertTrue(mp instanceof TieredMergePolicy);
  TieredMergePolicy tieredMP = (TieredMergePolicy) mp;

  // mp-specific setter
  assertEquals(19, tieredMP.getMaxMergeAtOnceExplicit());

  // make sure we apply compoundFile and mergeFactor
  assertEquals(false, tieredMP.getUseCompoundFile());
  assertEquals(7, tieredMP.getMaxMergeAtOnce());

  // make sure we overrode segmentsPerTier (split from maxMergeAtOnce out of mergeFactor)
  assertEquals(9D, tieredMP.getSegmentsPerTier(), 0.001);

  // make sure we overrode noCFSRatio (useless because we disabled useCompoundFile,
  // but just to make sure it works)
  assertEquals(1.0D, tieredMP.getNoCFSRatio(), 0.001);
}
项目:read-open-source-code    文件:SortingMergePolicy.java   
@Override
public MergePolicy.DocMap getDocMap(final MergeState mergeState) {
  if (unsortedReaders == null) {
    throw new IllegalStateException();
  }
  if (docMap == null) {
    return super.getDocMap(mergeState);
  }
  assert mergeState.docMaps.length == 1; // we returned a singleton reader
  final MonotonicAppendingLongBuffer deletes = getDeletes(unsortedReaders);
  return new MergePolicy.DocMap() {
    @Override
    public int map(int old) {
      final int oldWithDeletes = old + (int) deletes.get(old);
      final int newWithDeletes = docMap.oldToNew(oldWithDeletes);
      return mergeState.docMaps[0].get(newWithDeletes);
    }
  };
}
项目:read-open-source-code    文件:SortingMergePolicy.java   
@Override
public MergePolicy.DocMap getDocMap(final MergeState mergeState) {
  if (unsortedReaders == null) {
    throw new IllegalStateException();
  }
  if (docMap == null) {
    return super.getDocMap(mergeState);
  }
  assert mergeState.docMaps.length == 1; // we returned a singleton reader
  final MonotonicAppendingLongBuffer deletes = getDeletes(unsortedReaders);
  return new MergePolicy.DocMap() {
    @Override
    public int map(int old) {
      final int oldWithDeletes = old + (int) deletes.get(old);
      final int newWithDeletes = docMap.oldToNew(oldWithDeletes);
      return mergeState.docMaps[0].get(newWithDeletes);
    }
  };
}
项目:read-open-source-code    文件:SortingMergePolicy.java   
@Override
public MergePolicy.DocMap getDocMap(final MergeState mergeState) {
  if (unsortedReaders == null) {
    throw new IllegalStateException();
  }
  if (docMap == null) {
    return super.getDocMap(mergeState);
  }
  assert mergeState.docMaps.length == 1; // we returned a singleton reader
  final PackedLongValues deletes = getDeletes(unsortedReaders);
  return new MergePolicy.DocMap() {
    @Override
    public int map(int old) {
      final int oldWithDeletes = old + (int) deletes.get(old);
      final int newWithDeletes = docMap.oldToNew(oldWithDeletes);
      return mergeState.docMaps[0].get(newWithDeletes);
    }
  };
}
项目:Maskana-Gestor-de-Conocimiento    文件:SortingMergePolicy.java   
@Override
public MergePolicy.DocMap getDocMap(final MergeState mergeState) {
  if (unsortedReaders == null) {
    throw new IllegalStateException();
  }
  if (docMap == null) {
    return super.getDocMap(mergeState);
  }
  assert mergeState.docMaps.length == 1; // we returned a singleton reader
  final MonotonicAppendingLongBuffer deletes = getDeletes(unsortedReaders);
  return new MergePolicy.DocMap() {
    @Override
    public int map(int old) {
      final int oldWithDeletes = old + (int) deletes.get(old);
      final int newWithDeletes = docMap.oldToNew(oldWithDeletes);
      return mergeState.docMaps[0].get(newWithDeletes);
    }
  };
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestSortingMergePolicy.java   
static MergePolicy newSortingMergePolicy(Sorter sorter) {
  // create a MP with a low merge factor so that many merges happen
  MergePolicy mp;
  if (random().nextBoolean()) {
    TieredMergePolicy tmp = newTieredMergePolicy(random());
    final int numSegs = _TestUtil.nextInt(random(), 3, 5);
    tmp.setSegmentsPerTier(numSegs);
    tmp.setMaxMergeAtOnce(_TestUtil.nextInt(random(), 2, numSegs));
    mp = tmp;
  } else {
    LogMergePolicy lmp = newLogMergePolicy(random());
    lmp.setMergeFactor(_TestUtil.nextInt(random(), 3, 5));
    mp = lmp;
  }
  // wrap it with a sorting mp
  return new SortingMergePolicy(mp, sorter);
}
项目:Maskana-Gestor-de-Conocimiento    文件:_TestUtil.java   
/** just tries to configure things to keep the open file
 * count lowish */
public static void reduceOpenFiles(IndexWriter w) {
  // keep number of open files lowish
  MergePolicy mp = w.getConfig().getMergePolicy();
  if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
    lmp.setNoCFSRatio(1.0);
  } else if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
    tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
    tmp.setNoCFSRatio(1.0);
  }
  MergeScheduler ms = w.getConfig().getMergeScheduler();
  if (ms instanceof ConcurrentMergeScheduler) {
    // wtf... shouldnt it be even lower since its 1 by default?!?!
    ((ConcurrentMergeScheduler) ms).setMaxMergesAndThreads(3, 2);
  }
}
项目:elasticsearch_my    文件:EngineConfig.java   
/**
 * Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
 */
public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool,
                    IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
                    MergePolicy mergePolicy, Analyzer analyzer,
                    Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
                    TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy,
                    TranslogConfig translogConfig, TimeValue flushMergesAfter, ReferenceManager.RefreshListener refreshListeners,
                    long maxUnsafeAutoIdTimestamp) {
    if (openMode == null) {
        throw new IllegalArgumentException("openMode must not be null");
    }
    this.shardId = shardId;
    this.indexSettings = indexSettings;
    this.threadPool = threadPool;
    this.warmer = warmer == null ? (a) -> {} : warmer;
    this.store = store;
    this.deletionPolicy = deletionPolicy;
    this.mergePolicy = mergePolicy;
    this.analyzer = analyzer;
    this.similarity = similarity;
    this.codecService = codecService;
    this.eventListener = eventListener;
    codecName = indexSettings.getValue(INDEX_CODEC_SETTING);
    // We give IndexWriter a "huge" (256 MB) buffer, so it won't flush on its own unless the ES indexing buffer is also huge and/or
    // there are not too many shards allocated to this node.  Instead, IndexingMemoryController periodically checks
    // and refreshes the most heap-consuming shards when total indexing heap usage across all shards is too high:
    indexingBufferSize = new ByteSizeValue(256, ByteSizeUnit.MB);
    this.translogRecoveryPerformer = translogRecoveryPerformer;
    this.queryCache = queryCache;
    this.queryCachingPolicy = queryCachingPolicy;
    this.translogConfig = translogConfig;
    this.flushMergesAfter = flushMergesAfter;
    this.openMode = openMode;
    this.refreshListeners = refreshListeners;
    assert maxUnsafeAutoIdTimestamp >= IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP :
        "maxUnsafeAutoIdTimestamp must be >= -1 but was " + maxUnsafeAutoIdTimestamp;
    this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp;
}
项目:elasticsearch_my    文件:InternalEngineTests.java   
protected InternalEngine createEngine(
    IndexSettings indexSettings,
    Store store,
    Path translogPath,
    MergePolicy mergePolicy,
    @Nullable IndexWriterFactory indexWriterFactory,
    @Nullable Supplier<SequenceNumbersService> sequenceNumbersServiceSupplier) throws IOException {
    EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null);
    InternalEngine internalEngine = createInternalEngine(indexWriterFactory, sequenceNumbersServiceSupplier, config);
    if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) {
        internalEngine.recoverFromTranslog();
    }
    return internalEngine;
}
项目:elasticsearch_my    文件:ElasticsearchUncaughtExceptionHandlerTests.java   
public void testIsFatalCause() {
    assertFatal(new MergePolicy.MergeException(new OutOfMemoryError(), null));
    assertFatal(new OutOfMemoryError());
    assertFatal(new StackOverflowError());
    assertFatal(new InternalError());
    assertFatal(new UnknownError());
    assertFatal(new IOError(new IOException()));
    assertNonFatal(new RuntimeException());
    assertNonFatal(new UncheckedIOException(new IOException()));
}
项目:Elasticsearch    文件:EngineConfig.java   
/**
 * Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
 */
public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService,
                    Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
                    MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
                    Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener,
                    TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, IndexSearcherWrappingService wrappingService, TranslogConfig translogConfig) {
    this.shardId = shardId;
    this.indexSettings = indexSettings;
    this.threadPool = threadPool;
    this.indexingService = indexingService;
    this.warmer = warmer;
    this.store = store;
    this.deletionPolicy = deletionPolicy;
    this.mergePolicy = mergePolicy;
    this.mergeSchedulerConfig = mergeSchedulerConfig;
    this.analyzer = analyzer;
    this.similarity = similarity;
    this.codecService = codecService;
    this.failedEngineListener = failedEngineListener;
    this.wrappingService = wrappingService;
    this.optimizeAutoGenerateId = indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, false);
    this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
    codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
    // We start up inactive and rely on IndexingMemoryController to give us our fair share once we start indexing:
    indexingBufferSize = IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER;
    gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
    versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE);
    updateVersionMapSize();
    this.translogRecoveryPerformer = translogRecoveryPerformer;
    this.forceNewTranslog = indexSettings.getAsBoolean(INDEX_FORCE_NEW_TRANSLOG, false);
    this.queryCache = queryCache;
    this.queryCachingPolicy = queryCachingPolicy;
    this.translogConfig = translogConfig;
}
项目:linden    文件:TieredMergePolicyFactory.java   
@Override
public MergePolicy getInstance(Map<String, String> config) throws IOException {
  TieredMergePolicy mergePolicy = new TieredMergePolicy();

  if (config.containsKey(SEGMENTS_PER_TIER)) {
    mergePolicy.setSegmentsPerTier(Double.valueOf(config.get(SEGMENTS_PER_TIER)));
  }
  if (config.containsKey(MAX_MERGE_AT_ONCE)) {
    mergePolicy.setMaxMergeAtOnce(Integer.valueOf(config.get(MAX_MERGE_AT_ONCE)));
  }
  return mergePolicy;
}
项目:linden    文件:LindenConfig.java   
public IndexWriterConfig createIndexWriterConfig() throws IOException {
  IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LATEST, getIndexAnalyzerInstance());
  indexWriterConfig.setRAMBufferSizeMB(48);

  MergePolicy mergePolicy = getPluginManager().getInstance(LindenConfigBuilder.MERGE_POLICY, MergePolicy.class);
  if (mergePolicy != null) {
    indexWriterConfig.setMergePolicy(mergePolicy);
  }
  LOGGER.info("Merge policy : {}", mergePolicy == null ? "Default" : mergePolicy);

  ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
  cms.setMaxMergesAndThreads(8, 1);
  indexWriterConfig.setMergeScheduler(cms);
  return indexWriterConfig;
}
项目:search    文件:LuceneTestCase.java   
public static MergePolicy newMergePolicy(Random r) {
  if (rarely(r)) {
    return new MockRandomMergePolicy(r);
  } else if (r.nextBoolean()) {
    return newTieredMergePolicy(r);
  } else if (r.nextInt(5) == 0) { 
    return newAlcoholicMergePolicy(r, classEnvRule.timeZone);
  }
  return newLogMergePolicy(r);
}
项目:search    文件:LuceneTestCase.java   
private static void configureRandom(Random r, MergePolicy mergePolicy) {
  if (r.nextBoolean()) {
    mergePolicy.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
  } else {
    mergePolicy.setNoCFSRatio(r.nextBoolean() ? 1.0 : 0.0);
  }

  if (rarely(r)) {
    mergePolicy.setMaxCFSSegmentSizeMB(0.2 + r.nextDouble() * 2.0);
  } else {
    mergePolicy.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
  }
}
项目:search    文件:TestMergeSchedulerExternal.java   
@Override
protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
  MergeThread thread = new MyMergeThread(writer, merge);
  thread.setThreadPriority(getMergeThreadPriority());
  thread.setDaemon(true);
  thread.setName("MyMergeThread");
  return thread;
}
项目:search    文件:RandomMergePolicy.java   
private RandomMergePolicy(MergePolicy inner) {
  super(inner.getNoCFSRatio(), 
        (long) (inner.getMaxCFSSegmentSizeMB() * 1024 * 1024));
  this.inner = inner;
  log.info("RandomMergePolicy wrapping {}: {}",
           inner.getClass(), inner);
}
项目:OTF-Query-Services    文件:LuceneIndexer.java   
public LuceneIndexer(String indexName) throws IOException {
    this.indexName = indexName;
    luceneWriterService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(),
            new NamedThreadFactory(threadGroup, indexName + " Lucene writer"));
    luceneWriterFutureCheckerService = Executors.newFixedThreadPool(1,
            new NamedThreadFactory(threadGroup, indexName + " Lucene future checker"));
    setupRoot();

    File indexDirectoryFile = new File(root.getPath() + "/" + indexName);
    System.out.println("Index: " + indexDirectoryFile);
    Directory indexDirectory = initDirectory(indexDirectoryFile);

    indexDirectory.clearLock("write.lock");

    IndexWriterConfig config = new IndexWriterConfig(luceneVersion, new StandardAnalyzer(luceneVersion));
    MergePolicy mergePolicy = new LogByteSizeMergePolicy();

    config.setMergePolicy(mergePolicy);
    config.setSimilarity(new ShortTextSimilarity());

    IndexWriter indexWriter = new IndexWriter(indexDirectory, config);

    trackingIndexWriter = new NRTManager.TrackingIndexWriter(indexWriter);

    boolean applyAllDeletes = false;

    searcherManager = new NRTManager(trackingIndexWriter, null, applyAllDeletes);

    // Refreshes searcher every 5 seconds when nobody is waiting, and up to 100 msec delay
    // when somebody is waiting:
    reopenThread = new NRTManagerReopenThread(searcherManager, 5.0, 0.1);
    this.startThread();

}
项目:NYBC    文件:_TestUtil.java   
public static void setUseCompoundFile(MergePolicy mp, boolean v) {
  if (mp instanceof TieredMergePolicy) {
    ((TieredMergePolicy) mp).setUseCompoundFile(v);
  } else if (mp instanceof LogMergePolicy) {
    ((LogMergePolicy) mp).setUseCompoundFile(v);
  } else {
    throw new IllegalArgumentException("cannot set compound file for MergePolicy " + mp);
  }
}
项目:NYBC    文件:TestMergeSchedulerExternal.java   
@Override
protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
  MergeThread thread = new MyMergeThread(writer, merge);
  thread.setThreadPriority(getMergeThreadPriority());
  thread.setDaemon(true);
  thread.setName("MyMergeThread");
  return thread;
}
项目:Maskana-Gestor-de-Conocimiento    文件:TestMergeSchedulerExternal.java   
@Override
protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
  MergeThread thread = new MyMergeThread(writer, merge);
  thread.setThreadPriority(getMergeThreadPriority());
  thread.setDaemon(true);
  thread.setName("MyMergeThread");
  return thread;
}
项目:elasticsearch_my    文件:IndexSettings.java   
/**
 * Returns the merge policy that should be used for this index.
 */
public MergePolicy getMergePolicy() {
    return mergePolicyConfig.getMergePolicy();
}
项目:elasticsearch_my    文件:EngineConfig.java   
/**
 * Returns the {@link org.apache.lucene.index.MergePolicy} for the engines {@link org.apache.lucene.index.IndexWriter}
 */
public MergePolicy getMergePolicy() {
    return mergePolicy;
}
项目:elasticsearch_my    文件:ElasticsearchConcurrentMergeScheduler.java   
@Override
protected void doMerge(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
    int totalNumDocs = merge.totalNumDocs();
    long totalSizeInBytes = merge.totalBytesSize();
    long timeNS = System.nanoTime();
    currentMerges.inc();
    currentMergesNumDocs.inc(totalNumDocs);
    currentMergesSizeInBytes.inc(totalSizeInBytes);

    OnGoingMerge onGoingMerge = new OnGoingMerge(merge);
    onGoingMerges.add(onGoingMerge);

    if (logger.isTraceEnabled()) {
        logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size", OneMergeHelper.getSegmentName(merge), merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes), new ByteSizeValue(merge.estimatedMergeBytes));
    }
    try {
        beforeMerge(onGoingMerge);
        super.doMerge(writer, merge);
    } finally {
        long tookMS = TimeValue.nsecToMSec(System.nanoTime() - timeNS);

        onGoingMerges.remove(onGoingMerge);
        afterMerge(onGoingMerge);

        currentMerges.dec();
        currentMergesNumDocs.dec(totalNumDocs);
        currentMergesSizeInBytes.dec(totalSizeInBytes);

        totalMergesNumDocs.inc(totalNumDocs);
        totalMergesSizeInBytes.inc(totalSizeInBytes);
        totalMerges.inc(tookMS);

        long stoppedMS = TimeValue.nsecToMSec(merge.rateLimiter.getTotalStoppedNS());
        long throttledMS = TimeValue.nsecToMSec(merge.rateLimiter.getTotalPausedNS());

        totalMergeStoppedTime.inc(stoppedMS);
        totalMergeThrottledTime.inc(throttledMS);

        String message = String.format(Locale.ROOT,
                                       "merge segment [%s] done: took [%s], [%,.1f MB], [%,d docs], [%s stopped], [%s throttled], [%,.1f MB written], [%,.1f MB/sec throttle]",
                                       OneMergeHelper.getSegmentName(merge),
                                       TimeValue.timeValueMillis(tookMS),
                                       totalSizeInBytes/1024f/1024f,
                                       totalNumDocs,
                                       TimeValue.timeValueMillis(stoppedMS),
                                       TimeValue.timeValueMillis(throttledMS),
                                       merge.rateLimiter.getTotalBytesWritten()/1024f/1024f,
                                       merge.rateLimiter.getMBPerSec());

        if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it
            logger.debug("{}", message);
        } else if (logger.isTraceEnabled()) {
            logger.trace("{}", message);
        }
    }
}
项目:elasticsearch_my    文件:ElasticsearchConcurrentMergeScheduler.java   
@Override
protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
    MergeThread thread = super.getMergeThread(writer, merge);
    thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " + thread.getName()));
    return thread;
}
项目:elasticsearch_my    文件:MergePolicyConfig.java   
MergePolicy getMergePolicy() {
    return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE;
}
项目:elasticsearch_my    文件:OnGoingMerge.java   
public OnGoingMerge(MergePolicy.OneMerge merge) {
    this.id = Integer.toString(System.identityHashCode(merge));
    this.mergedSegments = merge.segments;
}
项目:elasticsearch_my    文件:ElasticsearchMergePolicy.java   
/** @param delegate the merge policy to wrap */
public ElasticsearchMergePolicy(MergePolicy delegate) {
    this.delegate = delegate;
}
项目:elasticsearch_my    文件:ElasticsearchUncaughtExceptionHandler.java   
static boolean isFatalUncaught(Throwable e) {
    return isFatalCause(e) || (e instanceof MergePolicy.MergeException && isFatalCause(e.getCause()));
}
项目:elasticsearch_my    文件:InternalEngineTests.java   
protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy,
                                      @Nullable IndexWriterFactory indexWriterFactory) throws IOException {
    return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterFactory, null);
}