Python numpy 模块,logaddexp() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.logaddexp()

项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:multiagent-particle-envs    作者:openai    | 项目源码 | 文件源码
def get_collision_force(self, entity_a, entity_b):
        if (not entity_a.collide) or (not entity_b.collide):
            return [None, None] # not a collider
        if (entity_a is entity_b):
            return [None, None] # don't collide against itself
        # compute actual distance between entities
        delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
        dist = np.sqrt(np.sum(np.square(delta_pos)))
        # minimum allowable distance
        dist_min = entity_a.size + entity_b.size
        # softmax penetration
        k = self.contact_margin
        penetration = np.logaddexp(0, -(dist - dist_min)/k)*k
        force = self.contact_force * delta_pos / dist * penetration
        force_a = +force if entity_a.movable else None
        force_b = -force if entity_b.movable else None
        return [force_a, force_b]
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:cpnest    作者:johnveitch    | 项目源码 | 文件源码
def increment(self,logL,nlive=None):
    """
    Increment the state of the evidence integrator
    Simply uses rectangle rule for initial estimate
    """
    if(logL<=self.logLs[-1]):
      print('WARNING: NS integrator received non-monotonic logL. {0:.3f} -> {1:.3f}'.format(self.logLs[-1],logL))
    if nlive is None:
      nlive = self.nlive
    oldZ = self.logZ
    logt=-1.0/nlive
    Wt = self.logw + logL + logsubexp(0,logt)
    self.logZ = logaddexp(self.logZ,Wt)
    # Update information estimate
    if np.isfinite(oldZ) and np.isfinite(self.logZ):
      self.info = exp(Wt - self.logZ)*logL + exp(oldZ - self.logZ)*(self.info + oldZ) - self.logZ

    # Update history
    self.logw += logt
    self.iteration += 1
    self.logLs.append(logL)
    self.log_vols.append(self.logw)
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _expected_durations(self,
            dur_potentials,cumulative_obs_potentials,
            alphastarl,betal,normalizer):
        if self.trunc is not None:
            raise NotImplementedError, "_expected_durations can't handle trunc"
        T = self.T
        logpmfs = -np.inf*np.ones_like(alphastarl)
        errs = np.seterr(invalid='ignore')
        for t in xrange(T):
            cB, offset = cumulative_obs_potentials(t)
            np.logaddexp(dur_potentials(t) + alphastarl[t] + betal[t:] +
                    cB - (normalizer + offset),
                    logpmfs[:T-t], out=logpmfs[:T-t])
        np.seterr(**errs)
        expected_durations = np.exp(logpmfs.T)

        return expected_durations


# TODO call this 'time homog'
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def messages_backwards(self):
        # NOTE: np.maximum calls are because the C++ code doesn't do
        # np.logaddexp(-inf,-inf) = -inf, it likes nans instead
        from hsmm_messages_interface import messages_backwards_log
        betal, betastarl = messages_backwards_log(
                np.maximum(self.trans_matrix,1e-50),self.aBl,np.maximum(self.aDl,-1000000),
                self.aDsl,np.empty_like(self.aBl),np.empty_like(self.aBl),
                self.right_censoring,self.trunc if self.trunc is not None else self.T)
        assert not np.isnan(betal).any()
        assert not np.isnan(betastarl).any()

        if not self.left_censoring:
            self._normalizer = np.logaddexp.reduce(np.log(self.pi_0) + betastarl[0])
        else:
            raise NotImplementedError

        return betal, betastarl
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _expected_durations(self,
            dur_potentials,cumulative_obs_potentials,
            alphastarl,betal,normalizer):
        logpmfs = -np.inf*np.ones((self.Tfull,alphastarl.shape[1]))
        errs = np.seterr(invalid='ignore') # logaddexp(-inf,-inf)
        # TODO censoring not handled correctly here
        for tblock in xrange(self.Tblock):
            possible_durations = self.segmentlens[tblock:].cumsum()[:self.trunc]
            cB, offset = cumulative_obs_potentials(tblock)
            logpmfs[possible_durations -1] = np.logaddexp(
                    dur_potentials(tblock) + alphastarl[tblock]
                    + betal[tblock:tblock+self.trunc if self.trunc is not None else None]
                    + cB - (offset + normalizer),
                    logpmfs[possible_durations -1])
        np.seterr(**errs)
        return np.exp(logpmfs.T)


###################
#  sparate trans  #
###################
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def aBl_einsum(self):
        if self._aBBl is None:
            sigmas = np.array([[c.sigmas for c in d.components] for d in self.obs_distns])
            Js = -1./(2*sigmas)
            mus = np.array([[c.mu for c in d.components] for d in self.obs_distns])

            # all_likes is T x Nstates x Ncomponents
            all_likes = \
                    (np.einsum('td,td,nkd->tnk',self.data,self.data,Js)
                        - np.einsum('td,nkd,nkd->tnk',self.data,2*mus,Js))
            all_likes += (mus**2*Js - 1./2*np.log(2*np.pi*sigmas)).sum(2)

            # weights is Nstates x Ncomponents
            weights = np.log(np.array([d.weights.weights for d in self.obs_distns]))
            all_likes += weights[na,...]

            # aBl is T x Nstates
            aBl = self._aBl = np.logaddexp.reduce(all_likes, axis=2)
            aBl[np.isnan(aBl).any(1)] = 0.

            aBBl = self._aBBl = np.empty((self.Tblock,self.num_states))
            for idx, (start,stop) in enumerate(self.changepoints):
                aBBl[idx] = aBl[start:stop].sum(0)

        return self._aBBl
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _expected_statistics_from_messages_slow(trans_potential,likelihood_log_potential,alphal,betal):
        expected_states = alphal + betal
        expected_states -= expected_states.max(1)[:,na]
        np.exp(expected_states,out=expected_states)
        expected_states /= expected_states.sum(1)[:,na]

        Al = np.log(trans_potential)
        log_joints = alphal[:-1,:,na] + (betal[1:,na,:] + likelihood_log_potential[1:,na,:]) + Al[na,...]
        log_joints -= log_joints.max((1,2))[:,na,na]
        joints = np.exp(log_joints)
        joints /= joints.sum((1,2))[:,na,na] # NOTE: renormalizing each isnt really necessary
        expected_transcounts = joints.sum(0)

        normalizer = np.logaddexp.reduce(alphal[0] + betal[0])

        return expected_states, expected_transcounts, normalizer

    ### EM
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _messages_backwards_log_slow(trans_potential, init_potential, likelihood_log_potential,
                                     feature_weights, window_data):
        errs = np.seterr(over='ignore')
        Al = np.log(trans_potential)
        pil = np.log(init_potential)
        aBl = likelihood_log_potential
        nhs = trans_potential.shape[0]
        sequence_length = aBl.shape[0]
        betal = np.zeros((sequence_length, nhs * 2))
        giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2))
        for t in xrange(betal.shape[0]-2,-1,-1):
            temp_constant = np.sum(feature_weights[:-nhs-1] * window_data[t+1,:]) + feature_weights[-1]
            temp_exp = temp_constant + feature_weights[-nhs-1:-1]
            temp_logaddexp = np.logaddexp(0, temp_exp)
            temp_log_linear = np.tile(temp_exp, 2) * np.repeat([0,1], nhs) - np.tile(temp_logaddexp, 2)

            np.logaddexp.reduce( giant_Al_pil + betal[t+1] +
                                 np.hstack((aBl[t+1], aBl[t+1])) +
                                 temp_log_linear
                                ,axis=1 ,out=(betal[t]))


        np.seterr(**errs)
        return betal
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _messages_backwards_log_fast(trans_potential, init_potential, likelihood_log_potential_llt):
        errs = np.seterr(over='ignore')
        Al = np.log(trans_potential)
        pil = np.log(init_potential)
        aBl = likelihood_log_potential_llt
        nhs = trans_potential.shape[0]
        sequence_length = aBl.shape[0]
        betal = np.zeros((sequence_length, nhs * 2))
        giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2))


        for t in xrange(betal.shape[0]-2,-1,-1):
            np.logaddexp.reduce( giant_Al_pil + betal[t+1] + aBl[t+1], axis=1, out=(betal[t]))

        np.seterr(**errs)
        return betal


    ### Gibbs sampling
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def _expected_segmentation_states(init_potential, expected_states, trans_potential, expected_joints,
                                      feature_weights, window_data):

        #log_q(s_t) for s_t = 1
        data_length = window_data.shape[0]
        mega_mat = np.hstack((window_data[:data_length - 1,:], expected_states[:data_length - 1,:]))
        temp_1 = np.sum(feature_weights * mega_mat, axis=1)
        with np.errstate(invalid='ignore'):
            temp_2 = np.sum(np.sum(expected_joints[:data_length - 1,:] * np.log(trans_potential), axis = 1), axis = 1)
        log_s_t_1 = temp_1 + temp_2
        log_s_t_1 = np.append(log_s_t_1, -float("inf")) #the last state is always zero so the probability of s_t = 1 is zero

        #log q(s_t) for s_t = 0
        log_s_t_0 = np.sum(expected_states[1:, :] * np.log(init_potential), axis = 1)
        log_s_t_0 = np.append(log_s_t_0, 0)

        temp_stack = np.hstack((log_s_t_1[:, na], log_s_t_0[:, na])) #number of rows is the length of the sequence
        expected_states = np.exp(temp_stack - np.logaddexp.reduce(temp_stack[:,:,na], axis = 1))
        return expected_states
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
    """Private function used to compute log probabilities within a job."""
    n_samples = X.shape[0]
    log_proba = np.empty((n_samples, n_classes))
    log_proba.fill(-np.inf)
    all_classes = np.arange(n_classes, dtype=np.int)

    for estimator, features in zip(estimators, estimators_features):
        log_proba_estimator = estimator.predict_log_proba(X[:, features])

        if n_classes == len(estimator.classes_):
            log_proba = np.logaddexp(log_proba, log_proba_estimator)

        else:
            log_proba[:, estimator.classes_] = np.logaddexp(
                log_proba[:, estimator.classes_],
                log_proba_estimator[:, range(len(estimator.classes_))])

            missing = np.setdiff1d(all_classes, estimator.classes_)
            log_proba[:, missing] = np.logaddexp(log_proba[:, missing],
                                                 -np.inf)

    return log_proba
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def _free_energy(self, v):
        """Computes the free energy F(v) = - log sum_h exp(-E(v,h)).

        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        free_energy : array-like, shape (n_samples,)
            The value of the free energy.
        """
        return (- safe_sparse_dot(v, self.intercept_visible_)
                - np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
                               + self.intercept_hidden_).sum(axis=1))
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def logaddexp(arr):
    """Computes log(exp(arr[0]) + exp(arr[1]) + ...). """
    assert(len(arr) >= 2)
    res = np.logaddexp(arr[0], arr[1])
    for i in arr[2:]:
        res = np.logaddexp(res, i)
    return res
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_logaddexp_values(self):
        x = [1, 2, 3, 4, 5]
        y = [5, 4, 3, 2, 1]
        z = [6, 6, 6, 6, 6]
        for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
            xf = np.log(np.array(x, dtype=dt))
            yf = np.log(np.array(y, dtype=dt))
            zf = np.log(np.array(z, dtype=dt))
            assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_logaddexp_range(self):
        x = [1000000, -1000000, 1000200, -1000200]
        y = [1000200, -1000200, 1000000, -1000000]
        z = [1000200, -1000000, 1000200, -1000000]
        for dt in ['f', 'd', 'g']:
            logxf = np.array(x, dtype=dt)
            logyf = np.array(y, dtype=dt)
            logzf = np.array(z, dtype=dt)
            assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_inf(self):
        inf = np.inf
        x = [inf, -inf,  inf, -inf, inf, 1,  -inf,  1]
        y = [inf,  inf, -inf, -inf, 1,   inf, 1,   -inf]
        z = [inf,  inf,  inf, -inf, inf, inf, 1,    1]
        with np.errstate(invalid='raise'):
            for dt in ['f', 'd', 'g']:
                logxf = np.array(x, dtype=dt)
                logyf = np.array(y, dtype=dt)
                logzf = np.array(z, dtype=dt)
                assert_equal(np.logaddexp(logxf, logyf), logzf)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_nan(self):
        assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
        assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
        assert_(np.isnan(np.logaddexp(np.nan, 0)))
        assert_(np.isnan(np.logaddexp(0, np.nan)))
        assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
项目:char-rbm    作者:colinmorris    | 项目源码 | 文件源码
def _free_energy(self, v):
        """Computes the free energy F(v) = - log sum_h exp(-E(v,h)).

        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        free_energy : array-like, shape (n_samples,)
            The value of the free energy.
        """
        return (- safe_sparse_dot(v, self.intercept_visible_)
                - np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
                               + self.intercept_hidden_).sum(axis=1))
项目:tbp-next-basket    作者:GiulioRossetti    | 项目源码 | 文件源码
def sigmoid(x):
    if x >= 0:
        return math.exp(-np.logaddexp(0, -x))
    else:
        return math.exp(x - np.logaddexp(x, 0))
项目:tbp-next-basket    作者:GiulioRossetti    | 项目源码 | 文件源码
def sigmoid(x):
    return math.exp(-np.logaddexp(0, -x))
项目:chainer-segnet    作者:pfnet-research    | 项目源码 | 文件源码
def check_forward(self, x_data, t_data, class_weight, use_cudnn=True):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        loss = softmax_cross_entropy.softmax_cross_entropy(
            x, t, use_cudnn=use_cudnn, normalize=self.normalize,
            cache_score=self.cache_score, class_weight=class_weight)
        self.assertEqual(loss.data.shape, ())
        self.assertEqual(loss.data.dtype, self.dtype)
        self.assertEqual(hasattr(loss.creator, 'y'), self.cache_score)
        loss_value = float(cuda.to_cpu(loss.data))

        # Compute expected value
        loss_expect = 0.0
        count = 0
        x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
            (self.t.size, self.x.shape[1]))
        t = self.t.ravel()
        for xi, ti in six.moves.zip(x, t):
            if ti == -1:
                continue
            log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
            if class_weight is None:
                loss_expect -= (xi - log_z)[ti]
            else:
                loss_expect -= (xi - log_z)[ti] * class_weight[ti]
            count += 1

        if self.normalize:
            if count == 0:
                loss_expect = 0.0
            else:
                loss_expect /= count
        else:
            loss_expect /= len(t_data)

        testing.assert_allclose(
            loss_expect, loss_value, **self.check_forward_options)
项目:massivedatans    作者:JohannesBuchner    | 项目源码 | 文件源码
def integrate_remainder(sampler, logwidth, logVolremaining, logZ, H, globalLmax):
    # logwidth remains the same now for each sample
    remainder = list(sampler.remainder())
    logV = logwidth
    L0 = remainder[-1][2]
    L0 = globalLmax
    logLs = [Li - L0 for ui, xi, Li in remainder]
    Ls = numpy.exp(logLs)
    LsMax = Ls.copy()
    LsMax[-1] = numpy.exp(globalLmax - L0)
    Lmax = LsMax[1:].sum(axis=0) + LsMax[-1]
    #Lmax = Ls[1:].sum(axis=0) + Ls[-1]
    Lmin = Ls[:-1].sum(axis=0) + Ls[0]
    logLmid = log(Ls.sum(axis=0)) + L0
    logZmid = logaddexp(logZ, logV + logLmid)
    logZup  = logaddexp(logZ, logV + log(Lmax) + L0)
    logZlo  = logaddexp(logZ, logV + log(Lmin) + L0)
    logZerr = logZup - logZlo
    assert numpy.isfinite(H).all()
    assert numpy.isfinite(logZerr).all(), logZerr

    for i in range(len(remainder)):
        ui, xi, Li = remainder[i]
        wi = logwidth + Li
        logZnew = logaddexp(logZ, wi)
        #Hprev = H
        H = exp(wi - logZnew) * Li + exp(logZ - logZnew) * (H + logZ) - logZnew
        H[H < 0] = 0
        #assert (H>0).all(), (H, Hprev, wi, Li, logZ, logZnew)
        logZ = logZnew

    #assert numpy.isfinite(logZerr + (H / sampler.nlive_points)**0.5), (H, sampler.nlive_points, logZerr)

    return logV + logLmid, logZerr, logZmid, logZerr + (H / sampler.nlive_points)**0.5, logZerr + (H / sampler.nlive_points)**0.5
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_logaddexp_values(self):
        x = [1, 2, 3, 4, 5]
        y = [5, 4, 3, 2, 1]
        z = [6, 6, 6, 6, 6]
        for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
            xf = np.log(np.array(x, dtype=dt))
            yf = np.log(np.array(y, dtype=dt))
            zf = np.log(np.array(z, dtype=dt))
            assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_logaddexp_range(self):
        x = [1000000, -1000000, 1000200, -1000200]
        y = [1000200, -1000200, 1000000, -1000000]
        z = [1000200, -1000000, 1000200, -1000000]
        for dt in ['f', 'd', 'g']:
            logxf = np.array(x, dtype=dt)
            logyf = np.array(y, dtype=dt)
            logzf = np.array(z, dtype=dt)
            assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_inf(self):
        inf = np.inf
        x = [inf, -inf,  inf, -inf, inf, 1,  -inf,  1]
        y = [inf,  inf, -inf, -inf, 1,   inf, 1,   -inf]
        z = [inf,  inf,  inf, -inf, inf, inf, 1,    1]
        with np.errstate(invalid='raise'):
            for dt in ['f', 'd', 'g']:
                logxf = np.array(x, dtype=dt)
                logyf = np.array(y, dtype=dt)
                logzf = np.array(z, dtype=dt)
                assert_equal(np.logaddexp(logxf, logyf), logzf)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_nan(self):
        assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
        assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
        assert_(np.isnan(np.logaddexp(np.nan, 0)))
        assert_(np.isnan(np.logaddexp(0, np.nan)))
        assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def forward(self, x):
        """
        Implementation of softplus. Overflow avoided by use of the logaddexp function.
        self._lower is added before returning.
        """
        return np.logaddexp(0, x) + self._lower
项目:deepspeech.mxnet    作者:samsungsds-rnd    | 项目源码 | 文件源码
def ctc_loss(label, prob, remainder, seq_length, batch_size, num_gpu=1, big_num=1e10):
    label_ = [0, 0]
    prob[prob < 1 / big_num] = 1 / big_num
    log_prob = np.log(prob)

    l = len(label)
    for i in range(l):
        label_.append(int(label[i]))
        label_.append(0)

    l_ = 2 * l + 1
    a = np.full((seq_length, l_ + 1), -big_num)
    a[0][1] = log_prob[remainder][0]
    a[0][2] = log_prob[remainder][label_[2]]
    for i in range(1, seq_length):
        row = i * int(batch_size / num_gpu) + remainder
        a[i][1] = a[i - 1][1] + log_prob[row][0]
        a[i][2] = np.logaddexp(a[i - 1][2], a[i - 1][1]) + log_prob[row][label_[2]]
        for j in range(3, l_ + 1):
            a[i][j] = np.logaddexp(a[i - 1][j], a[i - 1][j - 1])
            if label_[j] != 0 and label_[j] != label_[j - 2]:
                a[i][j] = np.logaddexp(a[i][j], a[i - 1][j - 2])
            a[i][j] += log_prob[row][label_[j]]

    return -np.logaddexp(a[seq_length - 1][l_], a[seq_length - 1][l_ - 1])


# label is done with remove_blank
# pred is got from pred_best
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def _forward_cpu_one(self, x, t, W):
        begin = self.begins[t]
        end = self.begins[t + 1]

        w = W[self.paths[begin:end]]
        wxy = w.dot(x) * self.codes[begin:end]
        loss = numpy.logaddexp(0.0, -wxy)  # == log(1 + exp(-wxy))
        return numpy.sum(loss)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data, t_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        loss = functions.softmax_cross_entropy(
            x, t, use_cudnn=use_cudnn, normalize=self.normalize,
            cache_score=self.cache_score)
        self.assertEqual(loss.data.shape, ())
        self.assertEqual(loss.data.dtype, self.dtype)
        self.assertEqual(hasattr(loss.creator, 'y'), self.cache_score)
        loss_value = float(cuda.to_cpu(loss.data))

        # Compute expected value
        loss_expect = 0.0
        count = 0
        x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
            (self.t.size, self.x.shape[1]))
        t = self.t.ravel()
        for xi, ti in six.moves.zip(x, t):
            if ti == -1:
                continue
            log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
            loss_expect -= (xi - log_z)[ti]
            count += 1

        if self.normalize:
            if count == 0:
                loss_expect = 0.0
            else:
                loss_expect /= count
        else:
            loss_expect /= len(t_data)

        gradient_check.assert_allclose(
            loss_expect, loss_value, **self.check_forward_options)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.log_softmax(x, use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)

        log_z = numpy.ufunc.reduce(
            numpy.logaddexp, self.x, axis=1, keepdims=True)
        y_expect = self.x - log_z

        gradient_check.assert_allclose(
            y_expect, y.data, **self.check_forward_options)
项目:improved_wgan_training    作者:YuguangTong    | 项目源码 | 文件源码
def js_with(self, p):
        log_p = np.array([p.log_likelihood(ngram) for ngram in p.unique_ngrams()])
        log_q = np.array([self.log_likelihood(ngram) for ngram in p.unique_ngrams()])
        log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
        kl_p_m = np.sum(np.exp(log_p) * (log_p - log_m))

        log_p = np.array([p.log_likelihood(ngram) for ngram in self.unique_ngrams()])
        log_q = np.array([self.log_likelihood(ngram) for ngram in self.unique_ngrams()])
        log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
        kl_q_m = np.sum(np.exp(log_q) * (log_q - log_m))

        return 0.5*(kl_p_m + kl_q_m) / np.log(2)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_logaddexp_values(self):
        x = [1, 2, 3, 4, 5]
        y = [5, 4, 3, 2, 1]
        z = [6, 6, 6, 6, 6]
        for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
            xf = np.log(np.array(x, dtype=dt))
            yf = np.log(np.array(y, dtype=dt))
            zf = np.log(np.array(z, dtype=dt))
            assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_logaddexp_range(self):
        x = [1000000, -1000000, 1000200, -1000200]
        y = [1000200, -1000200, 1000000, -1000000]
        z = [1000200, -1000000, 1000200, -1000000]
        for dt in ['f', 'd', 'g']:
            logxf = np.array(x, dtype=dt)
            logyf = np.array(y, dtype=dt)
            logzf = np.array(z, dtype=dt)
            assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_inf(self):
        inf = np.inf
        x = [inf, -inf,  inf, -inf, inf, 1,  -inf,  1]
        y = [inf,  inf, -inf, -inf, 1,   inf, 1,   -inf]
        z = [inf,  inf,  inf, -inf, inf, inf, 1,    1]
        with np.errstate(invalid='raise'):
            for dt in ['f', 'd', 'g']:
                logxf = np.array(x, dtype=dt)
                logyf = np.array(y, dtype=dt)
                logzf = np.array(z, dtype=dt)
                assert_equal(np.logaddexp(logxf, logyf), logzf)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_nan(self):
        assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
        assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
        assert_(np.isnan(np.logaddexp(np.nan, 0)))
        assert_(np.isnan(np.logaddexp(0, np.nan)))
        assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
项目:rnn.wgan    作者:amirbar    | 项目源码 | 文件源码
def js_with(self, p):
        log_p = np.array([p.log_likelihood(ngram) for ngram in p.unique_ngrams()])
        log_q = np.array([self.log_likelihood(ngram) for ngram in p.unique_ngrams()])
        log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
        kl_p_m = np.sum(np.exp(log_p) * (log_p - log_m))

        log_p = np.array([p.log_likelihood(ngram) for ngram in self.unique_ngrams()])
        log_q = np.array([self.log_likelihood(ngram) for ngram in self.unique_ngrams()])
        log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
        kl_q_m = np.sum(np.exp(log_q) * (log_q - log_m))

        return 0.5 * (kl_p_m + kl_q_m) / np.log(2)
项目:improved_wgan_training    作者:igul222    | 项目源码 | 文件源码
def js_with(self, p):
        log_p = np.array([p.log_likelihood(ngram) for ngram in p.unique_ngrams()])
        log_q = np.array([self.log_likelihood(ngram) for ngram in p.unique_ngrams()])
        log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
        kl_p_m = np.sum(np.exp(log_p) * (log_p - log_m))

        log_p = np.array([p.log_likelihood(ngram) for ngram in self.unique_ngrams()])
        log_q = np.array([self.log_likelihood(ngram) for ngram in self.unique_ngrams()])
        log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
        kl_q_m = np.sum(np.exp(log_q) * (log_q - log_m))

        return 0.5*(kl_p_m + kl_q_m) / np.log(2)
项目:pyurgent    作者:tiendung    | 项目源码 | 文件源码
def evalObjectiveFunction(clean_biascount,Pword_plus,Pword_minus,ratio,removing_words):
    mlog(evalObjectiveFunction.__name__,"call")
    import numpy as np
    import math
    #Obj = np.log(1)
    Obj = 0
    for doc in clean_biascount:
        Pdoc = calcProbabilityDocument(Pword_plus,Pword_minus,doc[1],ratio,removing_words)
        i = clean_biascount.index(doc) 
        if i == 0:
            mlog(evalObjectiveFunction.__name__,'Pdoc 0 ' + str(Pdoc) + ' type ' + str(type(Pdoc)))
            t00 = np.exp(np.float64(Pdoc[0]))
            t01 = np.exp(np.float64(Pdoc[1]))
            t1 = t00 - t01
            #print str(t1)
            t2 = abs(t1)
            Obj = np.log(t2)
            #print str(Obj)
        if i % 100 == 0:
            mlog(evalObjectiveFunction.__name__,"Pdoc + " + str(i) + "= " + str(Pdoc))
        if doc[0] == True:
            Obj = np.logaddexp(Obj,Pdoc[0])
            if i % 100 == 1:
                mlog(evalObjectiveFunction.__name__,"Obj+ " + str(i) + " after += " + str(Obj))
            Obj = np.log(np.exp(Obj) - np.exp(Pdoc[1]))
        elif doc[0] == False:
            Obj = np.log(np.exp(np.float64(Obj)) + np.exp(np.float64(Pdoc[1])))
            if i % 100 == 2:
                mlog(evalObjectiveFunction.__name__,"Obj- " + str(i) + " after += " + str(Obj))
            Obj = np.log(np.exp(Obj) - np.exp(Pdoc[0]))
        if Obj == 0.0:
            mlog(evalObjectiveFunction.__name__, "Obj=0 fuck " + str(i) + "")
    mlog(evalObjectiveFunction.__name__,"Obj = " + str(np.exp(Obj)))
    if math.isnan(np.exp(Obj))==False:
        print 'J = ' + str(np.exp(Obj))
    return Obj #type np.log
项目:e2end    作者:oplatek    | 项目源码 | 文件源码
def sigmoid(x):
    x = np.array(x)
    return np.exp(-np.logaddexp(0, -x))
项目:nonce2vec    作者:minimalparts    | 项目源码 | 文件源码
def update_phi(self, doc_number, time):
        """
        Update variational multinomial parameters, based on a document and a time-slice.
        This is done based on the original Blei-LDA paper, where:
        log_phi := beta * exp(?(gamma)), over every topic for every word.

        TODO: incorporate lee-sueng trick used in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
        """
        num_topics = self.lda.num_topics
        # digamma values
        dig = np.zeros(num_topics)

        for k in range(0, num_topics):
            dig[k] = digamma(self.gamma[k])

        n = 0   # keep track of iterations for phi, log_phi
        for word_id, count in self.doc:
            for k in range(0, num_topics):
                self.log_phi[n][k] = dig[k] + self.lda.topics[word_id][k]

            log_phi_row = self.log_phi[n]
            phi_row = self.phi[n]

            # log normalize
            v = log_phi_row[0]
            for i in range(1, len(log_phi_row)):
                v = np.logaddexp(v, log_phi_row[i])

            # subtract every element by v
            log_phi_row = log_phi_row - v 
            phi_row = np.exp(log_phi_row)
            self.log_phi[n] = log_phi_row
            self.phi[n] = phi_row
            n +=1 # increase iteration

        return self.phi, self.log_phi
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_logaddexp_values(self):
        x = [1, 2, 3, 4, 5]
        y = [5, 4, 3, 2, 1]
        z = [6, 6, 6, 6, 6]
        for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
            xf = np.log(np.array(x, dtype=dt))
            yf = np.log(np.array(y, dtype=dt))
            zf = np.log(np.array(z, dtype=dt))
            assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_logaddexp_range(self):
        x = [1000000, -1000000, 1000200, -1000200]
        y = [1000200, -1000200, 1000000, -1000000]
        z = [1000200, -1000000, 1000200, -1000000]
        for dt in ['f', 'd', 'g']:
            logxf = np.array(x, dtype=dt)
            logyf = np.array(y, dtype=dt)
            logzf = np.array(z, dtype=dt)
            assert_almost_equal(np.logaddexp(logxf, logyf), logzf)