Java 类gnu.trove.map.hash.TObjectDoubleHashMap 实例源码

项目:xcc    文件:RegAllocPBQP.java   
private PBQPVector constructCostVector(int vreg,
        TIntArrayList allowedReg,
        TObjectDoubleHashMap<Pair<Integer, Integer>> coalsceMap,
        double spillCost)
{
    PBQPVector cost = new PBQPVector(allowedReg.size()+1);
    cost.set(0, spillCost);

    for (int i = 0, e = allowedReg.size(); i < e; i++)
    {
        int preg = allowedReg.get(i);
        Pair<Integer, Integer> regPair = Pair.get(preg, vreg);
        if (coalsceMap.containsKey(regPair))
        {
            cost.set(i+1, -coalsceMap.get(regPair));
        }
    }
    return cost;
}
项目:spf    文件:LexemeCooccurrenceScorer.java   
protected static TObjectDoubleHashMap<String> readStats(
        BufferedReader reader) throws IOException {
    final TObjectDoubleHashMap<String> pmis = new TObjectDoubleHashMap<String>();

    String line = reader.readLine();
    while (line != null) { // for each line in the file
        line = line.trim();
        if (!line.equals("")) {
            final String[] tokens = line.split("..\\:\\:..");
            final String id = tokens[0] + "  ::  " + tokens[1];
            final double score = Double.parseDouble(tokens[2]);
            pmis.put(id, new Double(score));
        }
        line = reader.readLine();
    }

    return pmis;
}
项目:cineast    文件:JSONUtils.java   
public static Pair<ImageQueryContainer, TObjectDoubleHashMap<String>> readQueryFromJSON(Reader reader){
    LOGGER.traceEntry();
    try {
        JsonObject jobj_in = JsonObject.readFrom(reader);
        ImageQueryContainer qc = queryContainerFromJSON(jobj_in);

        String weights = jobj_in.get("weights").toString();

        TObjectDoubleHashMap<String> weightMap = getWeightsFromJsonString(weights);

        return LOGGER.traceExit(new Pair<ImageQueryContainer, TObjectDoubleHashMap<String>>(qc, weightMap));
    } catch (IOException e) {
        LOGGER.error(LogHelper.getStackTrace(e));
        return null;
    }
}
项目:cineast    文件:RetrieverConfig.java   
public TObjectDoubleHashMap<Retriever> getRetrieversByCategory(String category){
    List<DoublePair<Class<? extends Retriever>>> list = this.retrieverCategories.get(category);
    if(list == null){
        return new TObjectDoubleHashMap<>(1);
    }

    TObjectDoubleHashMap<Retriever> _return = new TObjectDoubleHashMap<>(list.size());
    for(DoublePair<Class<? extends Retriever>> pair : list){
        Retriever rev = ReflectionHelper.instanciate(pair.key);
        if(rev != null){
            _return.put(rev, pair.value);
        }
    }

    return _return;
}
项目:demidovii    文件:FieldRelevanceModelTraversal.java   
private Node useRelevantDocuments() throws Exception {
  List<String> docnames = p.getList("documents", String.class);
  List<Document> docs = getDocuments(docnames);
  FieldLanguageModel flm = new FieldLanguageModel();
  for (Document d : docs) {
    if (d.terms != null && d.terms.size() > 0) {
      flm.addDocument(d);
    }
  }

  Node termNodes = new Node("combine", new NodeParameters(), new ArrayList<Node>(), 0);
  termNodes.getNodeParameters().set("norm", false);

  // Now put get the sub-model for each term
  TObjectDoubleHashMap<String> weights = new TObjectDoubleHashMap<String>();
  for (String term : queryTerms) {
    weights.clear();
    for (String field : fields) {
      weights.put(term, flm.getFieldProbGivenTerm(field, term));
    }
    termNodes.addChild(createTermFieldNodes(term, weights));
  }
  return termNodes;
}
项目:demidovii    文件:NodeParameters.java   
@Override
public NodeParameters clone() {
  NodeParameters duplicate = new NodeParameters();
  if (keyMapping != null) {
    duplicate.keyMapping = new HashMap<>(this.keyMapping);
  }
  if (boolMap != null) {
    duplicate.boolMap = new TObjectByteHashMap<>(boolMap);
  }
  if (longMap != null) {
    duplicate.longMap = new TObjectLongHashMap<>(longMap);
  }
  if (doubleMap != null) {
    duplicate.doubleMap = new TObjectDoubleHashMap<>(doubleMap);
  }
  if (stringMap != null) {
    duplicate.stringMap = new HashMap<>(this.stringMap);
  }
  return duplicate;
}
项目:smt-ibm-model    文件:IBMModel3.java   
private void initFertility() {
    n = new TObjectDoubleHashMap<FertWord>();
    fertWordHashCode = new int[maxLe + 1][foDict.size()];

    double value = 1 / (double) (maxLe + 1);

    for (SentencePair p : sentPairs) {
        int le = p.getE().length();
        for (int fert = 0; fert <= le; fert++) {
            for (int i = 0; i <= p.getF().length(); i++) {
                int f = p.getF().get(i);
                int hashCode = Utils.generateTwoIntegersHashCode(fert, f);
                fertWordHashCode[fert][f] = hashCode;
                FertWord fw = getFertWord(fert, f);
                n.put(fw, value);
            }
        }
    }
}
项目:smt-ibm-model    文件:IBMModel1Extended.java   
@Override
protected void initTransProbs() {
    super.initTransProbs();

    // add probability from labeled data
    Set<WordPair> keySet = t.keySet();
    TObjectDoubleHashMap<WordPair> labeledDataProb = computeTFromLabeledData();

    for (WordPair ef : keySet) {
        if (labeledDataProb.containsKey(ef)) {
            t.put(ef, t.get(ef) + labeledDataProb.get(ef));
        }
    }

    // nomalization
    nomalizeT();
}
项目:galago-git    文件:FieldRelevanceModelTraversal.java   
private Node useRelevantDocuments() throws Exception {
  List<String> docnames = p.getList("documents", String.class);
  List<Document> docs = getDocuments(docnames);
  FieldLanguageModel flm = new FieldLanguageModel();
  for (Document d : docs) {
    if (d.terms != null && d.terms.size() > 0) {
      flm.addDocument(d);
    }
  }

  Node termNodes = new Node("combine", new NodeParameters(), new ArrayList<Node>(), 0);
  termNodes.getNodeParameters().set("norm", false);

  // Now put get the sub-model for each term
  TObjectDoubleHashMap<String> weights = new TObjectDoubleHashMap<String>();
  for (String term : queryTerms) {
    weights.clear();
    for (String field : fields) {
      weights.put(term, flm.getFieldProbGivenTerm(field, term));
    }
    termNodes.addChild(createTermFieldNodes(term, weights));
  }
  return termNodes;
}
项目:galago-git    文件:NodeParameters.java   
@Override
public NodeParameters clone() {
  NodeParameters duplicate = new NodeParameters();
  if (keyMapping != null) {
    duplicate.keyMapping = new HashMap<>(this.keyMapping);
  }
  if (boolMap != null) {
    duplicate.boolMap = new TObjectByteHashMap<>(boolMap);
  }
  if (longMap != null) {
    duplicate.longMap = new TObjectLongHashMap<>(longMap);
  }
  if (doubleMap != null) {
    duplicate.doubleMap = new TObjectDoubleHashMap<>(doubleMap);
  }
  if (stringMap != null) {
    duplicate.stringMap = new HashMap<>(this.stringMap);
  }
  return duplicate;
}
项目:wikit    文件:HierarchicalLDA.java   
/** Propagate a topic weight to a node and all its children.
    weight is assumed to be a log.
*/
   public void propagateTopicWeight(TObjectDoubleHashMap<NCRPNode> nodeWeights,
                                 NCRPNode node, double weight) {
    if (! nodeWeights.containsKey(node)) {
        // calculating the NCRP prior proceeds from the
        //  root down (ie following child links),
        //  but adding the word-topic weights comes from
        //  the bottom up, following parent links and then 
        //  child links. It's possible that the leaf node may have
        //  been removed just prior to this round, so the current
        //  node may not have an NCRP weight. If so, it's not 
        //  going to be sampled anyway, so ditch it.
        return;
    }

    for (NCRPNode child: node.children) {
        propagateTopicWeight(nodeWeights, child, weight);
    }

    nodeWeights.adjustValue(node, weight);
   }
项目:wikit    文件:CollectionUtils.java   
/** Returns the key in map that has the greatest score */
public static Object argmax (TObjectDoubleHashMap map)
{
  // A local class! Yes, Virginia, this is legal Java.
  class Accumulator implements TObjectDoubleProcedure {
    double bestVal = Double.NEGATIVE_INFINITY;
    Object bestObj = null;
    public boolean execute (Object a, double b)
    {
      if (b > bestVal) {
        bestVal = b;
        bestObj = a;
      }
      return true;
    }
  }

  Accumulator procedure = new Accumulator ();
  map.forEachEntry (procedure);
  return procedure.bestObj;
}
项目:nina    文件:HierachicalDocTopicModel.java   
/**
 * Recursively calculates the probability of selecting a parent based on the
 * random walk probability of reaching the target w.r.t the gamma restart
 * probability. Probability is 0 if there is no edge between currentParent
 * -> target in the original directedFeatureGraph
 * 
 * @param parentProbabilities
 *            Store the probabilities of each node being the target's parent
 * @param parentCandidate
 *            The current parent node to have its probability calculated
 * @param target
 *            The node for which paths are being sampled
 * @param parentProbability
 *            Calculated RWR probability for currentParent to be the
 *            target's parent
 */
private void calculateRWR(
        TObjectDoubleHashMap<RWRNode> parentProbabilities,
        RWRNode parentCandidate, final RWRNode target,
        double parentProbability) {
    for (RWRNode child : parentCandidate.children) {
        if (child.ins != target.ins) {
            double w = parentProbability
                    + Math.log((1 - gamma)
                            / (double) parentCandidate.children.size());
            calculateRWR(parentProbabilities, child, target, w);
        }
    }

    // Probability is 0 if there is no edge between parentCandidate ->
    // target in the original directedFeatureGraph. In fact, its not even
    // stored in the result map
    if (graph.containsEdge(parentCandidate.ins, target.ins)) {
        parentProbabilities.put(parentCandidate,
                parentProbability + Math.log(gamma));
    }
}
项目:java-probabilistic-earley-parser    文件:ForwardScores.java   
ForwardScores(final Grammar grammar) {
        this.semiring = grammar.semiring;
//        this.atoms = grammar.atoms;
        final double zero = semiring.zero();
//        zeroA = atoms.getAtom(zero);
        this.forwardScores = new TObjectDoubleHashMap<>(500, 0.5F, zero);
    }
项目:spf    文件:LexemeCooccurrenceScorer.java   
protected static TObjectDoubleHashMap<String> readStatsFile(File f)
        throws IOException {
    try (final BufferedReader reader = new BufferedReader(
            new FileReader(f))) {
        return readStats(reader);
    }
}
项目:spf    文件:TroveHashVector.java   
TroveHashVector(IHashVectorImmutable other) {
    if (other instanceof TroveHashVector) {
        this.values = new TObjectDoubleHashMap<KeyArgs>(
                ((TroveHashVector) other).values);
    } else {
        this.values = new TObjectDoubleHashMap<KeyArgs>(10, 0.5f,
                ZERO_VALUE);
        for (final Pair<KeyArgs, Double> o : other) {
            values.put(o.first(), o.second());
        }
    }
}
项目:cineast    文件:JSONUtils.java   
public static TObjectDoubleHashMap<String> getWeightsFromJsonString(String s){
    TObjectDoubleHashMap<String> weightMap = new TObjectDoubleHashMap<String>();
    JsonObject weights = JsonObject.readFrom(s);
    weightMap.put("global", weights.get("global").asDouble());
    weightMap.put("local", weights.get("local").asDouble());
    weightMap.put("edge", weights.get("edge").asDouble());
    weightMap.put("text", weights.get("text").asDouble());
    weightMap.put("motion", weights.get("motion").asDouble());
    weightMap.put("motionbackground", weights.get("motionbackground").asDouble());
    weightMap.put("complex", weights.get("complex").asDouble());
    return weightMap;
}
项目:cineast    文件:ScoreElement.java   
/**
 * Merges the entries in a collection of ScoreElements into the provided TObjectDoubleHashMap where the ScoreElements ID serves as key
 * and its score serves as value. If an entry already exists, the value of that entry is adjusted. Every score-value is multiplied with a weight before being merged to the map.
 *
 * @param collection The collection of ScoreElements to merge.
 * @param map The score-map to merge the elements into.
 * @param weight The weight that should be applied to each score.
 * @return TObjectDoubleHashMap
 */
static <T extends ScoreElement> TObjectDoubleHashMap<String> mergeWithScoreMap(Collection<T> collection, TObjectDoubleHashMap<String> map, double weight) {
    for (T element : collection) {
        double score = element.getScore();
        if (Double.isInfinite(score) || Double.isNaN(score)) {
            continue;
        }
        double weightedScore = score * weight;
        map.adjustOrPutValue(element.getId(), weightedScore, weightedScore);
    }
    return map;
}
项目:cineast    文件:ContinuousQueryDispatcher.java   
public static List<SegmentScoreElement> retrieve(QueryContainer query,
    TObjectDoubleHashMap<Retriever> retrievers,
    RetrieverInitializer initializer,
    ReadableQueryConfig config) {
  return new ContinuousQueryDispatcher(r -> new RetrievalTask(r, query, config), retrievers,
      initializer).doRetrieve();
}
项目:cineast    文件:ContinuousQueryDispatcher.java   
public static List<SegmentScoreElement> retrieve(String segmentId,
    TObjectDoubleHashMap<Retriever> retrievers,
    RetrieverInitializer initializer,
    ReadableQueryConfig config) {
  return new ContinuousQueryDispatcher(r -> new RetrievalTask(r, segmentId, config), retrievers,
      initializer).doRetrieve();
}
项目:cineast    文件:ContinuousRetrievalLogic.java   
public static List<SegmentScoreElement> retrieve(QueryContainer qc, String category,
    ReadableQueryConfig config) {
  TObjectDoubleHashMap<Retriever> retrievers = Config.sharedConfig().getRetriever()
      .getRetrieversByCategory(category);
  if (retrievers.isEmpty()) {
    return new ArrayList<SegmentScoreElement>(0);
  }
  return ContinuousQueryDispatcher.retrieve(qc, retrievers, API.getInitializer(), config);
}
项目:cineast    文件:ContinuousRetrievalLogic.java   
public static List<SegmentScoreElement> retrieve(String id, String category, ReadableQueryConfig config) {
  TObjectDoubleHashMap<Retriever> retrievers = Config.sharedConfig().getRetriever()
      .getRetrieversByCategory(category);
  if (retrievers.isEmpty()) {
    return new ArrayList<SegmentScoreElement>(0);
  }
  return ContinuousQueryDispatcher.retrieve(id, retrievers, API.getInitializer(), config);
}
项目:rheem    文件:SimpleEstimationContext.java   
@Override
public SimpleEstimationContext deserialize(JSONObject json, Class<? extends SimpleEstimationContext> cls) {
    final List<CardinalityEstimate> inCards = JsonSerializables.deserializeAllAsList(
            json.getJSONArray("inCards"),
            CardinalityEstimate.class
    );
    final List<CardinalityEstimate> outCards = JsonSerializables.deserializeAllAsList(
            json.getJSONArray("outCards"),
            CardinalityEstimate.class
    );

    final TObjectDoubleHashMap<String> doubleProperties = new TObjectDoubleHashMap<>();
    final JSONObject doublePropertiesJson = json.optJSONObject("properties");
    if (doublePropertiesJson != null) {
        for (String key : doublePropertiesJson.keySet()) {
            doubleProperties.put(key, doublePropertiesJson.getDouble(key));
        }
    }

    final int numExecutions = json.getInt("executions");

    return new SimpleEstimationContext(
            inCards.toArray(new CardinalityEstimate[inCards.size()]),
            outCards.toArray(new CardinalityEstimate[outCards.size()]),
            doubleProperties,
            numExecutions
    );
}
项目:rheem    文件:NestableLoadProfileEstimatorTest.java   
@Test
public void testFromJuelSpecification() {
    String specification = "{" +
            "\"type\":\"juel\"," +
            "\"in\":2," +
            "\"out\":1," +
            "\"p\":0.8," +
            "\"cpu\":\"${3*in0 + 2*in1 + 7*out0}\"," +
            "\"ram\":\"${6*in0 + 4*in1 + 14*out0}\"," +
            "\"overhead\":143," +
            "\"ru\":\"${rheem:logGrowth(0.1, 0.1, 10000, in0+in1)}\"" +
            "}";
    final NestableLoadProfileEstimator estimator =
            LoadProfileEstimators.createFromSpecification(null, specification);
    final LoadProfile estimate = estimator.estimate(new SimpleEstimationContext(
            new CardinalityEstimate[]{
                    new CardinalityEstimate(10, 10, 1d), new CardinalityEstimate(100, 100, 1d)
            },
            new CardinalityEstimate[]{new CardinalityEstimate(200, 300, 1d)},
            new TObjectDoubleHashMap<>(),
            1
    ));

    Assert.assertEquals(3 * 10 + 2 * 100 + 7 * 200, estimate.getCpuUsage().getLowerEstimate(), 0.01);
    Assert.assertEquals(3 * 10 + 2 * 100 + 7 * 300, estimate.getCpuUsage().getUpperEstimate(), 0.01);
    Assert.assertEquals(
            OptimizationUtils.logisticGrowth(0.1, 0.1, 10000, 100 + 10),
            estimate.getResourceUtilization(),
            0.000000001
    );
    Assert.assertEquals(143, estimate.getOverheadMillis());
}
项目:rheem    文件:NestableLoadProfileEstimatorTest.java   
@Test
public void testFromMathExSpecification() {
    String specification = "{" +
            "\"type\":\"mathex\"," +
            "\"in\":2," +
            "\"out\":1," +
            "\"p\":0.8," +
            "\"cpu\":\"3*in0 + 2*in1 + 7*out0\"," +
            "\"ram\":\"6*in0 + 4*in1 + 14*out0\"," +
            "\"overhead\":143," +
            "\"ru\":\"logGrowth(0.1, 0.1, 10000, in0+in1)\"" +
            "}";
    final NestableLoadProfileEstimator estimator =
            LoadProfileEstimators.createFromSpecification(null, specification);
    final LoadProfile estimate = estimator.estimate(new SimpleEstimationContext(
            new CardinalityEstimate[]{
                    new CardinalityEstimate(10, 10, 1d), new CardinalityEstimate(100, 100, 1d)
            },
            new CardinalityEstimate[]{new CardinalityEstimate(200, 300, 1d)},
            new TObjectDoubleHashMap<>(),
            1
    ));

    Assert.assertEquals(3 * 10 + 2 * 100 + 7 * 200, estimate.getCpuUsage().getLowerEstimate(), 0.01);
    Assert.assertEquals(3 * 10 + 2 * 100 + 7 * 300, estimate.getCpuUsage().getUpperEstimate(), 0.01);
    Assert.assertEquals(
            OptimizationUtils.logisticGrowth(0.1, 0.1, 10000, 100 + 10),
            estimate.getResourceUtilization(),
            0.000000001
    );
    Assert.assertEquals(143, estimate.getOverheadMillis());
}
项目:rheem    文件:NestableLoadProfileEstimatorTest.java   
@Test
public void testFromJuelSpecificationWithImport() {
    String specification = "{" +
            "\"in\":2," +
            "\"out\":1," +
            "\"import\":[\"numIterations\"]," +
            "\"p\":0.8," +
            "\"cpu\":\"${(3*in0 + 2*in1 + 7*out0) * numIterations}\"," +
            "\"ram\":\"${6*in0 + 4*in1 + 14*out0}\"," +
            "\"overhead\":143," +
            "\"ru\":\"${rheem:logGrowth(0.1, 0.1, 10000, in0+in1)}\"" +
            "}";
    final NestableLoadProfileEstimator estimator =
            LoadProfileEstimators.createFromSpecification(null, specification);
    SomeExecutionOperator execOp = new SomeExecutionOperator();
    TObjectDoubleMap<String> properties = new TObjectDoubleHashMap<>();
    properties.put("numIterations", 2d);
    final LoadProfile estimate = estimator.estimate(new SimpleEstimationContext(
            new CardinalityEstimate[]{
                    new CardinalityEstimate(10, 10, 1d), new CardinalityEstimate(100, 100, 1d)
            },
            new CardinalityEstimate[]{new CardinalityEstimate(200, 300, 1d)},
            properties,
            1
    ));

    Assert.assertEquals((3 * 10 + 2 * 100 + 7 * 200)  * execOp.getNumIterations(), estimate.getCpuUsage().getLowerEstimate(), 0.01);
    Assert.assertEquals((3 * 10 + 2 * 100 + 7 * 300)  * execOp.getNumIterations(), estimate.getCpuUsage().getUpperEstimate(), 0.01);
    Assert.assertEquals(
            OptimizationUtils.logisticGrowth(0.1, 0.1, 10000, 100 + 10),
            estimate.getResourceUtilization(),
            0.000000001
    );
    Assert.assertEquals(143, estimate.getOverheadMillis());
}
项目:rheem    文件:NestableLoadProfileEstimatorTest.java   
@Test
public void testMathExFromSpecificationWithImport() {
    String specification = "{" +
            "\"type\":\"mathex\"," +
            "\"in\":2," +
            "\"out\":1," +
            "\"import\":[\"numIterations\"]," +
            "\"p\":0.8," +
            "\"cpu\":\"(3*in0 + 2*in1 + 7*out0) * numIterations\"," +
            "\"ram\":\"6*in0 + 4*in1 + 14*out0\"," +
            "\"overhead\":143," +
            "\"ru\":\"logGrowth(0.1, 0.1, 10000, in0+in1)\"" +
            "}";
    final NestableLoadProfileEstimator estimator =
            LoadProfileEstimators.createFromSpecification(null, specification);
    SomeExecutionOperator execOp = new SomeExecutionOperator();
    TObjectDoubleMap<String> properties = new TObjectDoubleHashMap<>();
    properties.put("numIterations", 2d);
    final LoadProfile estimate = estimator.estimate(new SimpleEstimationContext(
            new CardinalityEstimate[]{
                    new CardinalityEstimate(10, 10, 1d), new CardinalityEstimate(100, 100, 1d)
            },
            new CardinalityEstimate[]{new CardinalityEstimate(200, 300, 1d)},
            properties,
            1
    ));

    Assert.assertEquals((3 * 10 + 2 * 100 + 7 * 200)  * execOp.getNumIterations(), estimate.getCpuUsage().getLowerEstimate(), 0.01);
    Assert.assertEquals((3 * 10 + 2 * 100 + 7 * 300)  * execOp.getNumIterations(), estimate.getCpuUsage().getUpperEstimate(), 0.01);
    Assert.assertEquals(
            OptimizationUtils.logisticGrowth(0.1, 0.1, 10000, 100 + 10),
            estimate.getResourceUtilization(),
            0.000000001
    );
    Assert.assertEquals(143, estimate.getOverheadMillis());
}
项目:HeliosStreams    文件:DeltaManager.java   
/**
 * Creates a new DeltaManager
 */
private DeltaManager() {
    final int initialDeltaCapacity = ConfigurationHelper.getIntSystemThenEnvProperty(DELTA_CAPACITY, DELTA_CAPACITY_DEFAULT);
    final float initialDeltaLoadFactor = ConfigurationHelper.getFloatSystemThenEnvProperty(DELTA_LOAD_FACTOR, DELTA_LOAD_FACTOR_DEFAULT);
    longDeltas = new TObjectLongHashMap<java.lang.String>(initialDeltaCapacity, initialDeltaLoadFactor, Long.MIN_VALUE);
    doubleDeltas = new TObjectDoubleHashMap<java.lang.String>(initialDeltaCapacity, initialDeltaLoadFactor, Double.MIN_NORMAL);
    intDeltas = new TObjectIntHashMap<java.lang.String>(initialDeltaCapacity, initialDeltaLoadFactor, Integer.MIN_VALUE);
    longVDeltas = new TObjectLongHashMap<java.lang.String>(initialDeltaCapacity, initialDeltaLoadFactor, Long.MIN_VALUE);
    doubleVDeltas = new TObjectDoubleHashMap<java.lang.String>(initialDeltaCapacity, initialDeltaLoadFactor, Double.MIN_NORMAL);
    intVDeltas = new TObjectIntHashMap<java.lang.String>(initialDeltaCapacity, initialDeltaLoadFactor, Integer.MIN_VALUE);      
    JMXHelper.registerMBean(OBJECT_NAME, this);     
}
项目:demidovii    文件:FieldRelevanceModelTraversal.java   
private Node createTermFieldNodes(String term, TObjectDoubleHashMap<String> weights) {
  int i = 0;
  ArrayList<Node> termFields = new ArrayList<Node>();
  NodeParameters np = new NodeParameters();

  for (String field : fields) {
    String partName = "field." + field;
    NodeParameters par1 = new NodeParameters();
    par1.set("default", term);
    par1.set("part", partName);
    Node termCount = new Node("counts", par1, new ArrayList<Node>(), 0);
    double weight = weights.get(field);
    weight = (weight > 0.0) ? weight : FieldLanguageModel.smoothing;
    if (retrieval.getGlobalParameters().get("printWeights", false)) {
      if (weight > FieldLanguageModel.smoothing) {
        System.err.printf("%s\t%s\t%f\n", term, field, weight);
      }
    }
    np.set(Integer.toString(i), weight);
    Node termScore = new Node("feature", scorerType + "-raw");
    termScore.getNodeParameters().set("lengths", field);
    termScore.addChild(termCount);
    termFields.add(termScore);
    i++;
  }
  Node termFieldNodes = new Node("combine", np, termFields, 0);
  Node logScoreNode = new Node("feature", "log");
  logScoreNode.addChild(termFieldNodes);
  return logScoreNode;
}
项目:demidovii    文件:RetrievalModelInstance.java   
public RetrievalModelInstance(RetrievalModelParameters params, Parameters settings) {
    this.params = params;
    this.settings = new TObjectDoubleHashMap<String>();
    this.outParams = Parameters.create();

    for (String p : params.getParams()) {
      unsafeSet(p, settings.getDouble(p));
    }
//  System.err.println("Created: " + toString());
    normalize();
//  System.err.println("Normal: " + toString());

  }
项目:demidovii    文件:RetrievalModelInstance.java   
@Override
public RetrievalModelInstance clone() {
  RetrievalModelInstance lpi = new RetrievalModelInstance();
  lpi.params = this.params;
  lpi.settings = new TObjectDoubleHashMap<String>(this.settings);
  lpi.outParams = outParams.clone();
  return lpi;
}
项目:demidovii    文件:NodeParameters.java   
public NodeParameters set(String key, double value) {
  ensureKeyType(key, Type.DOUBLE);
  if (doubleMap == null) {
    doubleMap = new TObjectDoubleHashMap<>();
  }
  doubleMap.put(key, value);
  return this;
}
项目:smt-ibm-model    文件:IBMModelAbstract.java   
protected void initCountT() {
    if (countT == null) {
        countT = new TObjectDoubleHashMap<WordPair>();
    } else {
        Set<WordPair> keySet = countT.keySet();

        for (WordPair ef : keySet) {
            countT.put(ef, 0.0);
        }
    }
}
项目:smt-ibm-model    文件:IBMModelAbstract.java   
protected void initTransProbs() {
    t = new TObjectDoubleHashMap<WordPair>();

    // uniform distribution
    double value = 1 / (double) enDict.size();

    for (SentencePair p : sentPairs) {
        for (int j = 1; j <= p.getE().length(); j++) {
            for (int i = iStart; i <= p.getF().length(); i++) {
                WordPair ef = p.getWordPair(j, i);
                t.put(ef, value);
            }
        }
    }
}
项目:smt-ibm-model    文件:IBMModel3.java   
private void initCountN() {
    if (countN == null) {
        countN = new TObjectDoubleHashMap<FertWord>();
    } else {
        Set<FertWord> keySet = countN.keySet();

        for (FertWord fw : keySet) {
            countN.put(fw, 0.0);
        }
    }

}
项目:java-belief-propagation    文件:Node.java   
public TObjectDoubleMap<STATES> getStateToLogPriorProbabilityAndProductIncomingMessages() {
    TObjectDoubleMap<STATES> stateToLogPriorProbabilityAndProductIncomingMessages = new TObjectDoubleHashMap<>();
    for (STATES state : this.getStates()) {
        double logProductOfIncomingMessages = 0;
        for (Edge<?, ?> edge : this.edges) {
            logProductOfIncomingMessages += edge.getLogIncomingMessage(this, state);
        }
        stateToLogPriorProbabilityAndProductIncomingMessages.put(state,
                this.getLogPriorProbablility(state) + logProductOfIncomingMessages);
    }
    return stateToLogPriorProbabilityAndProductIncomingMessages;
}
项目:ecir2015timebooks    文件:CompareRuns.java   
public static TObjectDoubleHashMap<String> toQidScore(List<Result> results) {
  TObjectDoubleHashMap<String> qdata = new TObjectDoubleHashMap<String>();
  qdata.ensureCapacity(results.size());
  for(Result rs : results) {
    qdata.put(rs.qid, rs.score);
  }
  return qdata;
}
项目:chai    文件:HashMapLanguageModel.java   
public void increment(TObjectDoubleHashMap<String> counts) {
  counts.forEachEntry((key, val) -> {
    length += val;
    counts.adjustOrPutValue(key, val, val);
    return true;
  });
}
项目:chai    文件:TopKHeap.java   
public static <T> List<Weighted<T>> takeTop(int k, TObjectDoubleHashMap<T> objects) {
  TopKHeap<Weighted<T>> top = new TopKHeap<>(k);
  objects.forEachEntry((obj, count) -> {
    top.offer(new Weighted<>(count, obj));
    return true;
  });
  return top.getSorted();
}
项目:galago-git    文件:FieldRelevanceModelTraversal.java   
private Node createTermFieldNodes(String term, TObjectDoubleHashMap<String> weights) {
  int i = 0;
  ArrayList<Node> termFields = new ArrayList<Node>();
  NodeParameters np = new NodeParameters();

  for (String field : fields) {
    String partName = "field." + field;
    NodeParameters par1 = new NodeParameters();
    par1.set("default", term);
    par1.set("part", partName);
    Node termCount = new Node("counts", par1, new ArrayList<Node>(), 0);
    double weight = weights.get(field);
    weight = (weight > 0.0) ? weight : FieldLanguageModel.smoothing;
    if (retrieval.getGlobalParameters().get("printWeights", false)) {
      if (weight > FieldLanguageModel.smoothing) {
        System.err.printf("%s\t%s\t%f\n", term, field, weight);
      }
    }
    np.set(Integer.toString(i), weight);
    Node termScore = new Node("feature", scorerType + "-raw");
    termScore.getNodeParameters().set("lengths", field);
    termScore.addChild(termCount);
    termFields.add(termScore);
    i++;
  }
  Node termFieldNodes = new Node("combine", np, termFields, 0);
  Node logScoreNode = new Node("feature", "log");
  logScoreNode.addChild(termFieldNodes);
  return logScoreNode;
}