Python scipy.misc 模块,logsumexp() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.misc.logsumexp()

项目:pyISC    作者:STREAM3    | 项目源码 | 文件源码
def predict_log_proba(self,X):
        assert self.class_column > -1

        X1 = None
        if isinstance(X, pyisc.DataObject):
            assert X.class_column == self.class_column
            X1 = X.as_2d_array()
        elif isinstance(X, ndarray):
            X1 = X.copy()


        if X1 is not None:

            logps = self.compute_logp(X1)

            LogPs = [x-logsumexp(x) for x in array(logps).T] #normalized

            return array(LogPs)
        else:
            raise ValueError("Unknown type of data to score:", type(X))
项目:mitre    作者:gerberlab    | 项目源码 | 文件源码
def flat_prior(self):
        """ Evaluate log-probability of each primitive in the population.

        Return value is properly normalized.

        In this base class, we implement a version where the 
        distribution over primitives is static; subclasses will 
        reevaluate this at each call based on the values of variables and 
        parameters.

        """

        raw_weights = np.zeros(len(self.rule_population))
        normalization = logsumexp(raw_weights)
        return raw_weights - normalization
###
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_value(self):
        with self.test_session(use_gpu=True):
            def _test_value(logits, n_experiments, given):
                logits = np.array(logits, np.float32)
                normalized_logits = logits - misc.logsumexp(
                    logits, axis=-1, keepdims=True)
                given = np.array(given)
                dist = Multinomial(logits, n_experiments)
                log_p = dist.log_prob(given)
                target_log_p = np.log(misc.factorial(n_experiments)) - \
                    np.sum(np.log(misc.factorial(given)), -1) + \
                    np.sum(given * normalized_logits, -1)
                self.assertAllClose(log_p.eval(), target_log_p)
                p = dist.prob(given)
                target_p = np.exp(target_log_p)
                self.assertAllClose(p.eval(), target_p)

            _test_value([-50., -20., 0.], 4, [1, 0, 3])
            _test_value([1., 10., 1000.], 1, [1, 0, 0])
            _test_value([[2., 3., 1.], [5., 7., 4.]], 3,
                        np.ones([3, 1, 3], dtype=np.int32))
            _test_value([-10., 10., 20., 50.], 100, [[0, 1, 99, 100],
                                                     [100, 99, 1, 0]])
项目:bolero    作者:rock-learning    | 项目源码 | 文件源码
def logsumexp(a, axis=None, b=None):
        a = np.asarray(a)
        if axis is None:
            a = a.ravel()
        else:
            a = np.rollaxis(a, axis)
        a_max = a.max(axis=0)
        if b is not None:
            b = np.asarray(b)
            if axis is None:
                b = b.ravel()
            else:
                b = np.rollaxis(b, axis)
            out = np.log(np.sum(b * np.exp(a - a_max), axis=0))
        else:
            out = np.log(np.sum(np.exp(a - a_max), axis=0))
        out += a_max
        return out
项目:hydrus    作者:mark-r-g    | 项目源码 | 文件源码
def ests_ll_quad(self, params):
        """
        Calculate the loglikelihood given model parameters `params`.

        This method uses Gaussian quadrature, and thus returns an *approximate*
        integral.
        """
        mu0, gamma0, err0 = np.split(params, 3)
        x = np.tile(self.z, (self.cfg.QCOUNT, 1, 1))  # (QCOUNTXnhospXnmeas)
        loc = mu0 + np.outer(QC1, gamma0)
        loc = np.tile(loc, (self.n, 1, 1))
        loc = np.transpose(loc, (1, 0, 2))
        scale = np.tile(err0, (self.cfg.QCOUNT, self.n, 1))
        zs = lpdf_3d(x=x, loc=loc, scale=scale)

        w2 = np.tile(self.w, (self.cfg.QCOUNT, 1, 1))
        wted = np.nansum(w2 * zs, axis=2).T  # (nhosp X QCOUNT)
        qh = np.tile(QC1, (self.n, 1))  # (nhosp X QCOUNT)
        combined = wted + norm.logpdf(qh)  # (nhosp X QCOUNT)

        return logsumexp(np.nan_to_num(combined), b=QC2, axis=1)  # (nhosp)
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def log_softmax(vec):
    """Robust version of the log of softmax values

    Args:
        vec: vector of log-odds

    Returns:
        A vector whose exponential sums to one with lgo of softmax values log(exp(x_k)/sum_i (exp(x_i)))

    Examples:
        >>> print(log_softmax(np.array([1.0, 1.0, 1.0, 1.0])))
        [-1.38629436 -1.38629436 -1.38629436 -1.38629436]
        >>> print(log_softmax(np.array([-1.0, -1.0, -1.0, -1.0])))
        [-1.38629436 -1.38629436 -1.38629436 -1.38629436]
        >>> print(log_softmax(np.array([1.0, 0.0, -1.0, 1.1])))
        [-0.9587315 -1.9587315 -2.9587315 -0.8587315]

    """
    return vec - logsumexp(vec)
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def log_softmax(vec):
    """Robust version of the log of softmax values

    Args:
        vec: vector of log-odds

    Returns:
        A vector whose exponential sums to one with lgo of softmax values log(exp(x_k)/sum_i (exp(x_i)))

    Examples:
        >>> print(log_softmax(np.array([1.0, 1.0, 1.0, 1.0])))
        [-1.38629436 -1.38629436 -1.38629436 -1.38629436]
        >>> print(log_softmax(np.array([-1.0, -1.0, -1.0, -1.0])))
        [-1.38629436 -1.38629436 -1.38629436 -1.38629436]
        >>> print(log_softmax(np.array([1.0, 0.0, -1.0, 1.1])))
        [-0.9587315 -1.9587315 -2.9587315 -0.8587315]

    """
    return vec - logsumexp(vec)
项目:NetPower_TestBed    作者:Vignesh2208    | 项目源码 | 文件源码
def log_normalize(a, axis=None):
    """Normalizes the input array so that the exponent of the sum is 1.

    Parameters
    ----------
    a : array
        Non-normalized input data.

    axis : int
        Dimension along which normalization is performed.

    Notes
    -----
    Modifies the input **inplace**.
    """
    a_lse = logsumexp(a, axis)
    a -= a_lse[:, np.newaxis]
项目:pynamd    作者:radakb    | 项目源码 | 文件源码
def _uwham_obj_grad(self, f_i):
        """Return the log-likelihood (scalar objective function) and its
        gradient (wrt the free energies) for a given value of the free
        energies.  Note that this expects one less free energy than there are
        states, since we always solve under the constraint that f_1 = 0.
        """
        _f_i = hstack((zeros(1), asarray(f_i)))
        # For numerical stability, use log quantities.
        logQ_nj = _f_i - self._u_nj_m
        logNorm_n = logsumexp(logQ_nj, 1, self.PIsdiag[newaxis, :])
        W_nj = exp(logQ_nj - logNorm_n[:, newaxis])
        # Compute matrix products and sums (equivalent to multiplying by
        # appropriate vector of ones). Note that using dot() with ndarrays is
        # _much_ faster than multiplying matrix objects.
        PIW = self.PIs.dot(W_nj.T)
        WPI = W_nj.dot(self.PIs)
        g = PIW.mean(axis=1) # used in gradient and Hessian computation
        kappa = logNorm_n.mean() - (self.PIsdiag.dot(_f_i)).sum()
        grad = (g - self.PIsdiag)[1:]
        self._hess = (diagflat(g) - PIW.dot(WPI) / self.total_samples)[1:, 1:]
        return kappa, grad
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def recursive_line(self, new_line=5246):
        stir = self.load()
        J = stir.shape[0]
        K = stir.shape[1]
        for x in range(new_line):
            n = J + x
            new_l =  np.ones((1, K)) * np.inf
            print(n)
            for m in range(1,K):
                if m > n:
                    continue
                elif m == n:
                    new_l[0, m] = 0
                elif m == 1:
                    new_l[0, 1] = logsumexp( [  np.log(n-1) + stir[n-1, m] ] )
                else:
                    new_l[0, m] = logsumexp( [ stir[n-1, m-1] , np.log(n-1) + stir[n-1, m] ] )
            stir = np.vstack((stir, new_l))

        #np.save('stirling.npy', stir)
        #np.load('stirling.npy')
        return stir
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def recursive_row(self, new_row=''):
        stir = np.load('stirling.npy')
        J = stir.shape[0]
        K = stir.shape[1]
        x = 0
        while stir.shape[0] != stir.shape[1]:
            m = K + x
            new_c =  np.ones((J, 1)) * np.inf
            stir = np.hstack((stir, new_c))
            print(m)
            for n in range(K,J):
                if m > n:
                    continue
                elif m == n:
                    stir[n, m] = 0

                else:
                    stir[n,m] = logsumexp( [ stir[n-1, m-1] , np.log(n-1) + stir[n-1, m] ] )
            x += 1

        #np.save('stirling.npy', stir)
        #np.load('stirling.npy',)
        return stir
项目:CNValloc    作者:m1m0r1    | 项目源码 | 文件源码
def _calc_gamma_psi(log_d, alpha, log_beta, gamma, log_psi0):
    log_psi = log_psi0
    count = 0
    #print ((np.exp(log_psi1 - log_psi) ** 2).sum())

    while count == 0 \
        or ((np.exp(log_psi1) - np.exp(log_psi)) ** 2).sum() > 0.001 \
        or ((gamma1 - gamma) ** 2).sum() > 0.001:
        #print ('gamma psi:', count, ((np.exp(log_psi1) - np.exp(log_psi)) ** 2).sum())
        log_psi1 = log_psi
        gamma1 = gamma

        psi_offset = (digamma(gamma))[:, np.newaxis, np.newaxis, :]

        log_psi = log_beta[np.newaxis, :, :, :] + psi_offset
        log_psi = log_normalize(log_psi, axis=3)
        gamma = np.exp(logsumexp(logsumexp(log_d[:, :, :, np.newaxis] + log_psi, axis=1), axis=1)) + alpha[np.newaxis, :]
        count += 1

    #log_psi = np.average([log_psi0, log_psi], axis=0, weights=[0.9, 0.1])   # weak learning
    return (gamma, log_psi)
项目:CNValloc    作者:m1m0r1    | 项目源码 | 文件源码
def _calc_alpha_beta(log_d, alpha0, log_beta0, gamma, log_psi):
    log_beta = logsumexp(log_psi + log_d[:, :, :, np.newaxis], axis=0)
    log_beta = log_normalize(log_beta, axis=1)

    log_smooth = np.log(10)
    alpha = alpha0
    N = gamma.shape[0]
    zero = 1e-30

    gamma_digamma_sum = digamma(gamma.sum(axis=1))[:, np.newaxis]
    g_offset = (digamma(gamma) - gamma_digamma_sum).sum(axis=0) / N
    # using log
    def next_alpha(alpha):
        das = digamma(alpha.sum())
        g = alpha * N * (das - digamma(alpha) + g_offset)
        h = alpha * N * (das + g_offset)
        z = N * das
        x = (alpha * g / h).sum()
        w = (alpha ** 2 / h).sum()
        return np.exp(np.log(alpha) - (g - x * alpha / (1/z + w)) / h)

    return (alpha, log_beta)
项目:CNValloc    作者:m1m0r1    | 项目源码 | 文件源码
def as_dict(self):
        """ Returns json encodable dictionary
        """
        haps = [[self._data.alphabets[np.argmax(site_beta)] for site_beta in beta_sites.transpose()] for beta_sites in self.log_beta]
        hap_probs = [[[np.exp(np.max(site_beta) - logsumexp(site_beta)) for site_beta in beta_sites.transpose()] for beta_sites in self.log_beta]]

        return {
            'nsites': self._data.nsites,
            'sample_ids': self._data.sample_ids,
            'nhaps': self.nhaps,
            'alphabets': list(self._data.alphabets),
            'step': self.step,
            'log_likelihood': self.ll,
            'alpha': list(map(float, self.alpha)),
            'log_beta': [[list(map(float, site_beta)) for site_beta in beta_sites.transpose()] for beta_sites in self.log_beta],
            'haps': haps,
            'hap_probs': hap_probs,
            'gamma': list(map(float, vec) for vec in self.gamma),
        }
项目:spherecluster    作者:clara-labs    | 项目源码 | 文件源码
def _log_likelihood(X, centers, weights, concentrations):
    if len(np.shape(X)) != 2:
        X = X.reshape((1, len(X)))

    n_examples, n_features = np.shape(X)
    n_clusters, _ = centers.shape

    if n_features <= 50:  # works up to about 50 before numrically unstable
        vmf_f = _vmf_log
    else:
        vmf_f = _vmf_log_asymptotic

    f_log = np.zeros((n_clusters, n_examples))
    for cc in range(n_clusters):
        f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])

    posterior = np.zeros((n_clusters, n_examples))
    weights_log = np.log(weights)
    posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
    for ee in range(n_examples):
        posterior[:, ee] = np.exp(
                posterior[:, ee] - logsumexp(posterior[:, ee]))

    return posterior
项目:megamix    作者:14thibea    | 项目源码 | 文件源码
def _step_E(self, points):
        """
        In this step the algorithm evaluates the responsibilities of each points in each cluster

        Parameters
        ----------
        points : an array (n_points,dim)

        Returns
        -------
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points

        """
        log_normal_matrix = _log_normal_matrix(points,self.means,self.cov_chol,
                                               self.covariance_type,self.n_jobs)
        log_product = log_normal_matrix + self.log_weights
        log_prob_norm = logsumexp(log_product,axis=1)

        log_resp = log_product - log_prob_norm[:,np.newaxis]

        return log_prob_norm,log_resp
项目:megamix    作者:14thibea    | 项目源码 | 文件源码
def _step_E(self, points):
        """
        In this step the algorithm evaluates the responsibilities of each points in each cluster

        Parameters
        ----------
        points : an array (n_points,dim)

        Returns
        -------
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points

        """
        log_normal_matrix = _log_normal_matrix(points,self.means,self.cov,self.covariance_type,self.n_jobs)
        log_product = log_normal_matrix + self.log_weights[:,np.newaxis].T
        log_prob_norm = logsumexp(log_product,axis=1)

        log_resp = log_product - log_prob_norm[:,np.newaxis]

        return log_prob_norm,log_resp
项目:relax    作者:duvenaud    | 项目源码 | 文件源码
def partial_eval(self, X, n_samples=5):
    if n_samples < 1000:
      res, iwae = self.sess.run(
          (self.lHat, self.iwae),
          feed_dict={self.x: X, self.n_samples: n_samples})
      res = [iwae] + res
    else:  # special case to handle OOM
      assert n_samples % 100 == 0, "When using large # of samples, it must be divisble by 100"
      res = []
      for i in xrange(int(n_samples/100)):
        logF, = self.sess.run(
            (self.logF,),
            feed_dict={self.x: X, self.n_samples: 100})
        res.append(logsumexp(logF, axis=1))
      res = [np.mean(logsumexp(res, axis=0) - np.log(n_samples))]
    return res


  # Random samplers
项目:SEVDL_MGP    作者:AMLab-Amsterdam    | 项目源码 | 文件源码
def loglike(nn, sample_preds, y):
    """Return the Avg. Test Log-Likelihood
    """
    if y.shape[1] == 1:
        y = y.ravel()
    sample_ll = np.zeros((sample_preds.shape[1], sample_preds.shape[0]))
    a, b = np.exp(nn.extra_inf['a1'].get_value()), np.exp(nn.extra_inf['b1'].get_value())
    etau, elogtau = (a / b).astype(np.float32), (psi(a) - np.log(b)).astype(np.float32)
    for sample in xrange(sample_preds.shape[0]):
        ypred = sample_preds[sample].astype(np.float32)
        if len(y.shape) > 1:
            sll = -.5 * np.sum(etau * (y - ypred)**2, axis=1)
        else:
            sll = -.5 * etau * (y - ypred)**2
        sample_ll[:, sample] = sll

    return np.mean(logsumexp(sample_ll, axis=1) - np.log(sample_preds.shape[0]) - .5 * np.log(2*np.pi) + .5 * elogtau.sum())
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def logprob(self, data):
        logprobs = np.stack(
            [server.logprob(data) for server in self._ensemble])
        logprobs = logsumexp(logprobs, axis=0)
        logprobs -= np.log(len(self._ensemble))
        assert logprobs.shape == (data.shape[0], )
        return logprobs
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def nb_exact_test(x_a, x_b, size_factor_a, size_factor_b, mu, phi):
    """ Compute p-value for a pairwise exact test using the negative binomial.
    Args:
      x_a (int) - Total count for a single gene in group A
      x_b (int) - Total count for a single gene in group B
      size_factor_a (float) - Sum of size factors for group A
      size_factor_b (float) - Sum of size factors for group B
      mu (float) - Common mean count for this gene
      phi (float) - Common dispersion for this gene
    Returns:
      p-value (float); the probability that a random pair of counts under the null hypothesis is more extreme
      than the observed pair of counts. """
    size_factor_a = float(size_factor_a)
    size_factor_b = float(size_factor_b)
    mu = float(mu)
    phi = float(phi)

    if (x_a + x_b) == 0:
        return 1.0
    if phi == 0:
        return 1.0
    if size_factor_a == 0 or size_factor_b == 0:
        return 1.0

    all_x_a = np.arange(0, 1+x_a+x_b, 1)
    all_x_b = np.arange(x_a+x_b, -1, -1)

    def log_prob(x, size_factor):
        return neg_bin_log_pmf(x, size_factor*mu, phi/size_factor)

    log_p_obs = log_prob(x_a, size_factor_a) + log_prob(x_b, size_factor_b)
    log_p_all = log_prob(all_x_a, size_factor_a) + log_prob(all_x_b, size_factor_b)

    more_extreme = log_p_all <= log_p_obs
    if np.sum(more_extreme) == 0:
        return 0.0

    return np.exp(logsumexp(log_p_all[log_p_all <= log_p_obs]) - logsumexp(log_p_all))
项目:mitre    作者:gerberlab    | 项目源码 | 文件源码
def flat_prior(self, state):
        """ Evaluate log-probability of each primitive in the population.

        Return value is properly normalized.

        """
        raw_phylogeny_weights = self.rule_population.flat_variable_weights 
        phylogeny_weights = scipy.stats.norm.logpdf(
            np.log(raw_phylogeny_weights),
            loc = state.phylogeny_mean,
            scale = state.phylogeny_std
        )

        total_duration = float(self.data.experiment_end - self.data.experiment_start)
        durations = (self.rule_population.flat_durations /
                     ((1.0+self.window_duration_epsilon)*total_duration)
                    )
        window_a = (
            state.window_concentration *
            state.window_typical_fraction
        )
        window_b = (
            state.window_concentration *
            (1.0-state.window_typical_fraction)
        )
        window_weights = scipy.stats.beta.logpdf(
            durations, 
            window_a,
            window_b
        )

        weights = phylogeny_weights + window_weights
        normalization = logsumexp(weights)
        return weights - normalization
项目:mitre    作者:gerberlab    | 项目源码 | 文件源码
def flat_prior(self, state):
        """ Evaluate log-probability of each primitive in the population.

        Return value is properly normalized.

        This subclass ignores phylogenetic weights.

        """
        total_duration = float(self.data.experiment_end - self.data.experiment_start)
        durations = (self.rule_population.flat_durations /
                     ((1.0+self.window_duration_epsilon)*total_duration)
                    )
        window_a = (
            state.window_concentration *
            state.window_typical_fraction
        )
        window_b = (
            state.window_concentration *
            (1.0-state.window_typical_fraction)
        )
        window_weights = scipy.stats.beta.logpdf(
            durations, 
            window_a,
            window_b
        )

        weights = window_weights
        normalization = logsumexp(weights)
        return weights - normalization
项目:musm-adt17    作者:stefanoteso    | 项目源码 | 文件源码
def _utils_to_pvals(self, utils):
        return np.exp(utils - logsumexp(self.noise * utils))
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def reward_from_reward_rnn_scores(self, action, reward_scores):
    """Rewards based on probabilities learned from data by trained RNN.

    Computes the reward_network's learned softmax probabilities. When used as
    rewards, allows the model to maintain information it learned from data.

    Args:
      action: A one-hot encoding of the chosen action.
      reward_scores: The value for each note output by the reward_rnn.
    Returns:
      Float reward value.
    """
    action_note = np.argmax(action)
    normalization_constant = logsumexp(reward_scores)
    return reward_scores[action_note] - normalization_constant
项目:paragraph2vec    作者:thunlp    | 项目源码 | 文件源码
def bound(self, corpus, gamma=None, subsample_ratio=1.0):
        """
        Estimate the variational bound of documents from `corpus`:
        E_q[log p(corpus)] - E_q[log q(corpus)]

        `gamma` are the variational parameters on topic weights for each `corpus`
        document (=2d matrix=what comes out of `inference()`).
        If not supplied, will be inferred from the model.

        """
        score = 0.0
        _lambda = self.state.get_lambda()
        Elogbeta = dirichlet_expectation(_lambda)

        for d, doc in enumerate(corpus):  # stream the input doc-by-doc, in case it's too large to fit in RAM
            if d % self.chunksize == 0:
                logger.debug("bound: at document #%i" % d)
            if gamma is None:
                gammad, _ = self.inference([doc])
            else:
                gammad = gamma[d]
            Elogthetad = dirichlet_expectation(gammad)

            # E[log p(doc | theta, beta)]
            score += numpy.sum(cnt * logsumexp(Elogthetad + Elogbeta[:, id]) for id, cnt in doc)

            # E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector
            score += numpy.sum((self.alpha - gammad) * Elogthetad)
            score += numpy.sum(gammaln(gammad) - gammaln(self.alpha))
            score += gammaln(numpy.sum(self.alpha)) - gammaln(numpy.sum(gammad))

        # compensate likelihood for when `corpus` above is only a sample of the whole corpus
        score *= subsample_ratio

        # E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar
        score += numpy.sum((self.eta - _lambda) * Elogbeta)
        score += numpy.sum(gammaln(_lambda) - gammaln(self.eta))
        score += numpy.sum(gammaln(self.eta * self.num_terms) - gammaln(numpy.sum(_lambda, 1)))
        return score
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_log_sum_exp(self):
        with self.test_session(use_gpu=True) as sess:
            a = np.array([[[1., 3., 0.2], [0.7, 2., 1e-6]],
                          [[0., 1e6, 1.], [1., 1., 1.]]])
            for keepdims in [True, False]:
                true_values = misc.logsumexp(a, (0, 2), keepdims=keepdims)
                test_values = sess.run(log_sum_exp(
                    tf.constant(a), (0, 2), keepdims))
                self.assertAllClose(test_values, true_values)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_log_mean_exp(self):
        with self.test_session(use_gpu=True) as sess:
            a = np.array([[[1., 3., 0.2], [0.7, 2., 1e-6]],
                          [[0., 1e6, 1.], [1., 1., 1.]]])
            for keepdims in [True, False]:
                true_values = misc.logsumexp(a, (0, 2), keepdims=keepdims) - \
                              np.log(a.shape[0] * a.shape[2])
                test_values = sess.run(log_mean_exp(
                    tf.constant(a), (0, 2), keepdims))
                self.assertAllClose(test_values, true_values)

            b = np.array([[0., 1e-6, 10.1]])
            test_values = sess.run(log_mean_exp(b, 0, keep_dims=False))
            self.assertTrue(np.abs(test_values - b).max() < 1e-6)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_value(self):
        with self.test_session(use_gpu=True):
            def _test_value(logits, given):
                logits = np.array(logits, np.float32)
                normalized_logits = logits - misc.logsumexp(
                    logits, axis=-1, keepdims=True)
                given = np.array(given, np.int32)
                cat = Categorical(logits)
                log_p = cat.log_prob(given)

                def _one_hot(x, depth):
                    n_elements = x.size
                    ret = np.zeros((n_elements, depth))
                    ret[np.arange(n_elements), x.flat] = 1
                    return ret.reshape(list(x.shape) + [depth])

                target_log_p = np.sum(_one_hot(
                    given, logits.shape[-1]) * normalized_logits, -1)
                self.assertAllClose(log_p.eval(), target_log_p)
                p = cat.prob(given)
                target_p = np.sum(_one_hot(
                    given, logits.shape[-1]) * np.exp(normalized_logits), -1)
                self.assertAllClose(p.eval(), target_p)

            _test_value([0.], [0, 0, 0])
            _test_value([-50., -10., -50.], [0, 1, 2, 1])
            _test_value([0., 4.], [[0, 1], [0, 1]])
            _test_value([[2., 3., 1.], [5., 7., 4.]],
                        np.ones([3, 1, 1], dtype=np.int32))
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_value(self):
        with self.test_session(use_gpu=True):
            def _test_value(logits, given):
                logits = np.array(logits, np.float32)
                normalized_logits = logits - misc.logsumexp(
                    logits, axis=-1, keepdims=True)
                given = np.array(given, np.int32)
                cat = OnehotCategorical(logits)
                log_p = cat.log_prob(tf.one_hot(given, logits.shape[-1],
                                                dtype=tf.int32))

                def _one_hot(x, depth):
                    n_elements = x.size
                    ret = np.zeros((n_elements, depth))
                    ret[np.arange(n_elements), x.flat] = 1
                    return ret.reshape(list(x.shape) + [depth])

                target_log_p = np.sum(_one_hot(
                    given, logits.shape[-1]) * normalized_logits, -1)
                self.assertAllClose(log_p.eval(), target_log_p)
                p = cat.prob(tf.one_hot(given, logits.shape[-1],
                                        dtype=tf.int32))
                target_p = np.sum(_one_hot(
                    given, logits.shape[-1]) * np.exp(normalized_logits), -1)
                self.assertAllClose(p.eval(), target_p)

            _test_value([0.], [0, 0, 0])
            _test_value([-50., -10., -50.], [0, 1, 2, 1])
            _test_value([0., 4.], [[0, 1], [0, 1]])
            _test_value([[2., 3., 1.], [5., 7., 4.]],
                        np.ones([3, 1, 1], dtype=np.int32))
项目:geepee    作者:thangbui    | 项目源码 | 文件源码
def compute_error(y, m, v, lik, median=False, no_samples=50):
    if lik == 'gauss':
        y = y.reshape((y.shape[0],))
        if median:
            rmse = np.sqrt(np.median((y - m)**2))
        else:
            rmse = np.sqrt(np.mean((y - m)**2))
        return rmse
    elif lik == 'cdf':
        K = no_samples
        fs = draw_randn_samples(K, m, v).T
        log_factor = stats.norm.logcdf(np.tile(y.reshape((y.shape[0], 1)), (1, K)) * fs)
        ll = logsumexp(log_factor - np.log(K), 1)
        return 1 - np.mean(ll > np.log(0.5))
项目:probabilistic2020    作者:KarchinLab    | 项目源码 | 文件源码
def log_shannon_entropy(log_p):
    """Calculates shannon entropy in bits.

    Parameters
    ----------
    p : np.array
        array of probabilities

    Returns
    -------
    shannon entropy in bits
    """
    out = -logsumexp(log_p + np.log(log_p))
    return out
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, values, log_weights=None):
        self.length = len(values)
        if log_weights is None:
            log_weights = np.full(len(values), -math.log(len(values))) # assume uniform
        else:
            log_weights = np.array(log_weights)
        weights = np.exp(log_weights - logsumexp(log_weights))
        self.distribution = collections.defaultdict(float)
        for i in range(self.length):
            self.distribution[values[i]] += weights[i]
        self.distribution = collections.OrderedDict(sorted(self.distribution.items()))
        self.values = np.array(list(self.distribution.keys()))
        self.weights = np.array(list(self.distribution.values()))
项目:senti    作者:stevenxxiu    | 项目源码 | 文件源码
def predict_proba(self, docs):
        scores = np.hstack(cur_model.score(docs).reshape(-1, 1) for cur_model in self.models)
        if self.class_priors:
            scores += self.class_scores
        return np.exp(scores - logsumexp(scores, axis=1, keepdims=True))
项目:NetPower_TestBed    作者:Vignesh2208    | 项目源码 | 文件源码
def _compute_log_likelihood(self, X):
        n_samples, _ = X.shape
        res = np.zeros((n_samples, self.n_components))

        for i in range(self.n_components):
            log_denses = self._compute_log_weighted_gaussian_densities(X, i)
            res[:, i] = logsumexp(log_denses, axis=1)

        return res
项目:NetPower_TestBed    作者:Vignesh2208    | 项目源码 | 文件源码
def _do_forward_pass(self, framelogprob):
        n_samples, n_components = framelogprob.shape
        fwdlattice = np.zeros((n_samples, n_components))
        _hmmc._forward(n_samples, n_components,
                       log_mask_zero(self.startprob_),
                       log_mask_zero(self.transmat_),
                       framelogprob, fwdlattice)
        return logsumexp(fwdlattice[-1]), fwdlattice
项目:pynamd    作者:radakb    | 项目源码 | 文件源码
def _W_nj(self):
        """The sample sorted sample weights in all states.

        FOR INTERNAL USE ONLY!
        """
        m = self.nstates_sampled
        logQ_nj = self._f[newaxis, :] - self._u_nj
        logNorm_n = logsumexp(logQ_nj[:, :m], 1, self.PIsdiag[newaxis, :])
        _W_nj = exp(logQ_nj - logNorm_n[:, newaxis])
        return _W_nj
项目:pynamd    作者:radakb    | 项目源码 | 文件源码
def compute_unsampled_weights(self, u_jn):
        """Compute the sample weights for unsampled states. This requires the
        observed reduced potential energies on the complete sample set.

        NB Use of this function can be obviated by simply including the
        unsampled states in the initial calculation. 

        Arguments
        ---------
        u_jn : array-like
            2d array-like of reduced potential energies. The dimensions must
            be L x N, where L is the number of states to compute free energies
            for and N is the total sample size.

        Returns
        -------
        W_nj_k : ndarray
            Sample weights for the N samples in each of L states 
        """
        u_nj_k = asarray(u_jn).T
        # NB ESTIMATING ERRORS HERE WILL CAUSE AN INFINITE LOOP!
        f_k, varf_k = self.compute_unsampled_free_energies(u_jn, False)
        logQ_nj_k = f_k[newaxis, :] - u_nj_k
        m = self.nstates_sampled
        logQ_nj = self._f[newaxis, :] - self._u_nj
        logNorm_n = logsumexp(logQ_nj[:, :m], 1, self.PIsdiag[newaxis, :])
        return exp(logQ_nj_k - logNorm_n[:, newaxis])
项目:PyBGMM    作者:junlulocky    | 项目源码 | 文件源码
def gibbs_update_mask(self, i_iter):
        mask_old = copy.deepcopy(self.mask)
        assert self.common_component.K==1, "common component can only have one cluster component"

        for i_dim in range(self.D):
            mask_new = copy.deepcopy(self.mask)

            # Compute log probability of `mask[i]`
            log_prob_mask = np.zeros(2, np.float)
            mask_new[i_dim] = 0
            log_prob_mask[0] = self.log_marg_mask(mask_new)
            mask_new[i_dim] = 1
            log_prob_mask[1] = self.log_marg_mask(mask_new)


            prob_mask = np.exp(log_prob_mask - logsumexp(log_prob_mask))

            # Sample the new component assignment for `mask[i]`
            k = utils.draw(prob_mask)
            self.mask[i_dim] = k

        self.p_bern = np.random.beta(self.bern_prior.a + np.sum(self.mask),
                                     self.bern_prior.b + self.D - np.sum(self.mask), 1)[0]
        self.make_robust_p_bern()

        if i_iter % 20 == 0:
            logging.info('p_bern_new: {}'.format(self.p_bern))
            logging.info('mask old: {}'.format(mask_old))
            logging.info('mask new: {}'.format(self.mask))
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def logpdf(self, rowid, targets, constraints=None, inputs=None):
        assert targets.keys() == self.outputs
        assert inputs.keys() == self.inputs
        assert not constraints
        x = inputs[self.inputs[0]]
        y = targets[self.outputs[0]]
        return logsumexp([
            np.log(.5)+self.uniform.logpdf(y-x**2),
            np.log(.5)+self.uniform.logpdf(-y-x**2)
        ])
项目:Bayes    作者:krzjoa    | 项目源码 | 文件源码
def _geq(self, X):
        numerator = self._log_proba(X)
        denominator = logsumexp(numerator, axis=1)
        denominator = denominator.reshape(len(denominator), 1)
        return numerator - denominator
项目:Bayes    作者:krzjoa    | 项目源码 | 文件源码
def _less(self, X):
        numerator = self._log_proba(X) + np.log(len(self.classes_) - 1)
        denominator = logsumexp(numerator, axis=1)
        denominator = denominator.reshape(len(denominator), 1)
        return  (numerator - denominator) + np.exp(-1) + np.exp(1)
项目:RBM_AE    作者:lingxuez    | 项目源码 | 文件源码
def outActivate(aValue, logScale=False):
        """
        out activate function: softmax; same dimension as aValue
        """
        if logScale:
            return aValue - misc.logsumexp(aValue)
        else:
            return np.exp(aValue) / np.sum(np.exp(aValue), axis=0)


## node a: pre-activation
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def calc_prob_same_diff(self, data_pairs, return_log=True, norm_probs=True,
                            return_prob='same'):
        assert data_pairs.shape[-2] == 2
        assert isinstance(return_log, bool)
        assert return_prob == 'same' or return_prob == 'diff'

        log_ps_diff = self.calc_probs_diff(data_pairs)
        log_ps_same = self.calc_probs_same(data_pairs)
        assert log_ps_diff.shape[-2] == log_ps_diff.shape[-1]
        assert log_ps_diff.shape[-1] == log_ps_same.shape[-1]

        log_prob_diff = logsumexp(log_ps_diff, axis=(-1, -2))
        log_prob_same = logsumexp(log_ps_same, axis=-1) + np.log(6)
        # Since there are 42 "different probabilities" and 7 "same".
        # Multiplying by six makes the prior 50/50 on the same/diff task.

        if return_prob == 'same':
            log_probs = log_prob_same
        else:
            log_probs = log_prob_diff

        if norm_probs is True:
            norms = logsumexp([log_prob_diff, log_prob_same], axis=0)
            log_probs = log_probs - norms

        if return_log is True:
            return log_probs
        else:
            return np.exp(log_probs)
项目:pgsm    作者:aroth85    | 项目源码 | 文件源码
def held_out_log_predicitive(clustering, dist, partition_prior, test_data, train_data, per_point=False):
    clustering = relabel_clustering(clustering)

    block_params = []

    log_cluster_prior = []

    block_ids = sorted(np.unique(clustering))

    for z in block_ids:
        params = dist.create_params_from_data(train_data[clustering == z])

        block_params.append(params)

        log_cluster_prior.append(partition_prior.log_tau_2_diff(params.N))

    num_blocks = len(block_ids)

    block_params.append(dist.create_params())

    log_cluster_prior.append(partition_prior.log_tau_1_diff(num_blocks))

    log_cluster_prior = np.array(log_cluster_prior)

    log_cluster_prior = log_normalize(log_cluster_prior)

    log_p = np.zeros((test_data.shape[0], len(log_cluster_prior)))

    for z, (w, params) in enumerate(zip(log_cluster_prior, block_params)):
        log_p[:, z] = w + dist.log_predictive_likelihood_bulk(test_data, params)

    if per_point:
        return log_sum_exp(log_p, axis=1)

    else:
        return np.sum(log_sum_exp(log_p, axis=1))
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def _get_eos_prob(self):
        """Get loglikelihood according cur_p, cur_r, and n_consumed """
        eos_point_prob = self._get_eos_point_prob(max(
                                              1, 
                                              self.n_consumed - self.offset))
        if self.use_point_probs:
            return eos_point_prob - self.max_eos_prob
        if not self.prev_eos_probs:
            self.prev_eos_probs.append(eos_point_prob)
            return eos_point_prob
        # bypass utils.log_sum because we always want to use logsumexp here 
        prev_sum = logsumexp(np.asarray([p for p in self.prev_eos_probs])) 
        self.prev_eos_probs.append(eos_point_prob)
        # Desired prob is eos_point_prob / (1-last_eos_probs_sum)
        return eos_point_prob - np.log(1.0-np.exp(prev_sum))
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def predict_next(self):
        """Looks up ngram scores via self.scores. """
        cur_hist_length = len(self.history)
        this_scores = [[] for _ in xrange(cur_hist_length+1)]
        this_unk_scores = [[] for _ in xrange(cur_hist_length+1)]
        for pos in xrange(len(self.scores)):
            this_scores[0].append(self.scores[pos])
            this_unk_scores[0].append(self.unk_scores[pos])
            acc = 0.0
            for order, word in enumerate(self.history):
                if pos + order + 1 >= len(self.scores):
                    break
                acc += utils.common_get(
                    self.scores[pos + order], word, 
                    self.unk_scores[pos + order])
                this_scores[order+1].append(acc + self.scores[pos + order + 1])
                this_unk_scores[order+1].append(
                    acc + self.unk_scores[pos + order + 1])
        combined_scores = []
        combined_unk_scores = []
        for order, (scores, unk_scores) in enumerate(zip(this_scores, 
                                                         this_unk_scores)):
            if scores and order + 1 >= self.min_order:
                score_matrix = np.vstack(scores)
                combined_scores.append(logsumexp(score_matrix, axis=0))
                combined_unk_scores.append(utils.log_sum(unk_scores))
        if not combined_scores:
            self.cur_unk_score = 0.0
            return {}
        self.cur_unk_score = sum(combined_unk_scores)
        return sum(combined_scores)
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def log_sum_log_semiring(vals):
    """Uses the ``logsumexp`` function in scipy to calculate the log of
    the sum of a set of log values.

    Args:
        vals  (set): List or set of numerical values
    """
    return logsumexp(numpy.asarray([val for val in vals]))


#log_sum = log_sum_log_semiring
项目:CNValloc    作者:m1m0r1    | 项目源码 | 文件源码
def log_normalize(log_vec, axis=0):
    axes = [slice(None)] * len(log_vec.shape)
    axes[axis] = np.newaxis
    return log_vec - logsumexp(log_vec, axis=axis)[axes]
项目:neural_reaction_fingerprint    作者:jnwei    | 项目源码 | 文件源码
def cat_error(preds, targets, num_outputs):
    # NOTE: preds matri gives log likelihood of result, not likelihood probability
    #      raise to exponential to get correct value
    # Use Brier score for error estimate

    preds = preds - logsumexp(preds, axis=1, keepdims=True)
    pred_probs = np.exp(preds)

    return np.mean(np.linalg.norm(pred_probs - targets, axis=1))