Python numpy 模块,log() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.log()

项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def svgd_kernel(self, h = -1):
        sq_dist = pdist(self.theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(self.theta.shape[0]+1))

        # compute the rbf kernel

        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, self.theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(self.theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy)
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def saveHintonPlot(self, matrix, num_tests, max_weight=None, ax=None):
        """Draw Hinton diagram for visualizing a weight matrix."""
        fig,ax = plt.subplots(1,1)

        if not max_weight:
            max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))

        ax.patch.set_facecolor('gray')
        ax.set_aspect('equal', 'box')
        ax.xaxis.set_major_locator(plt.NullLocator())
        ax.yaxis.set_major_locator(plt.NullLocator())

        for (x, y), w in np.ndenumerate(matrix):
            color = 'white' if w > 0 else 'black'
            size = np.sqrt(np.abs(0.5*w/num_tests)) # Need to scale so that it is between 0 and 0.5
            rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
                                 facecolor=color, edgecolor=color)
            ax.add_patch(rect)

        ax.autoscale_view()
        ax.invert_yaxis()
        plt.savefig(self.figures_path + self.save_prefix + '-Hinton.eps')
        plt.close()
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def _factor_target_indices(self, Y_inds, vocab_size=None, base=2):
    if vocab_size is None:
      vocab_size = len(self.dp.word_index)
    print >>sys.stderr, "Factoring targets of vocabulary size: %d"%(vocab_size)
    num_vecs = int(math.ceil(math.log(vocab_size)/math.log(base))) + 1
    base_inds = []
    div_Y_inds = Y_inds
    print >>sys.stderr, "Number of factors: %d"%num_vecs
    for i in range(num_vecs):
      new_inds = div_Y_inds % base
      if i == num_vecs - 1:
        if new_inds.sum() == 0:
          # Most significant "digit" is a zero. Omit it.
          break
      base_inds.append(new_inds)
      div_Y_inds = numpy.copy(div_Y_inds/base)
    base_vecs = [self._make_one_hot(base_inds_i, base) for base_inds_i in base_inds]
    return base_vecs
项目:pylspm    作者:lseman    | 项目源码 | 文件源码
def sampleSize(self):
        r = 0.3
        alpha = 0.05
#        power=0.9

        C = 0.5 * np.log((1 + r) / (1 - r))

        Za = scipy.stats.norm.ppf(1 - (0.05 / 2))

        sizeArray = []
        powerArray = []

        power = 0.5
        for i in range(50, 100, 1):
            power = i / 100
            powerArray.append(power)

            Zb = scipy.stats.norm.ppf(1 - power)
            N = abs((Za - Zb) / C)**2 + 3

            sizeArray.append(N)

        return [powerArray, sizeArray]
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def sample(self, sample_size=20, text=None):
    """Sample the documents."""
    p = 1

    if text != None:
      try:
        x, word_idxs = self.reader.get(text)
      except Exception as e:
        print(e)
        return
    else:
      x, word_idxs = self.reader.random()
    print(" [*] Text: %s" % " ".join([self.reader.idx2word[word_idx] for word_idx in word_idxs]))

    cur_ps = self.sess.run(self.p_x_i, feed_dict={self.x: x})
    word_idxs = np.array(cur_ps).argsort()[-sample_size:][::-1]
    ps = cur_ps[word_idxs]

    for idx, (cur_p, word_idx) in enumerate(zip(ps, word_idxs)):
      print("  [%d] %-20s: %.8f" % (idx+1, self.reader.idx2word[word_idx], cur_p))
      p *= cur_p

      print(" [*] perp : %8.f" % -np.log(p))
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def lr_grad_norm_avg(self):
    # this is for enforcing lr * grad_norm not 
    # increasing dramatically in case of instability.
    #  Not necessary for basic use.
    global_state = self._global_state
    beta = self._beta
    if "lr_grad_norm_avg" not in global_state:
      global_state['grad_norm_squared_avg_log'] = 0.0
    global_state['grad_norm_squared_avg_log'] = \
      global_state['grad_norm_squared_avg_log'] * beta \
      + (1 - beta) * np.log(global_state['grad_norm_squared'] + eps)
    if "lr_grad_norm_avg" not in global_state:
      global_state["lr_grad_norm_avg"] = \
        0.0 * beta + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      # we monitor the minimal smoothed ||lr * grad||
      global_state["lr_grad_norm_avg_min"] = \
        np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() )
    else:
      global_state["lr_grad_norm_avg"] = global_state["lr_grad_norm_avg"] * beta \
        + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      global_state["lr_grad_norm_avg_min"] = \
        min(global_state["lr_grad_norm_avg_min"], 
            np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) )
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def lr_grad_norm_avg(self):
    # this is for enforcing lr * grad_norm not 
    # increasing dramatically in case of instability.
    #  Not necessary for basic use.
    global_state = self._global_state
    beta = self._beta
    if "lr_grad_norm_avg" not in global_state:
      global_state['grad_norm_squared_avg_log'] = 0.0
    global_state['grad_norm_squared_avg_log'] = \
      global_state['grad_norm_squared_avg_log'] * beta \
      + (1 - beta) * np.log(global_state['grad_norm_squared'] + eps)
    if "lr_grad_norm_avg" not in global_state:
      global_state["lr_grad_norm_avg"] = \
        0.0 * beta + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      # we monitor the minimal smoothed ||lr * grad||
      global_state["lr_grad_norm_avg_min"] = \
        np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() )
    else:
      global_state["lr_grad_norm_avg"] = global_state["lr_grad_norm_avg"] * beta \
        + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      global_state["lr_grad_norm_avg_min"] = \
        min(global_state["lr_grad_norm_avg_min"], 
            np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) )
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def sample_dtype(self):
        return tf.int32

# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
#     def __init__(self, logits):
#         self.logits = logits
#         self.ps = tf.nn.softmax(logits)
#     @classmethod
#     def fromflat(cls, flat):
#         return cls(flat)
#     def flatparam(self):
#         return self.logits
#     def mode(self):
#         return U.argmax(self.logits, axis=1)
#     def logp(self, x):
#         return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
#     def kl(self, other):
#         return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
#                 - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def entropy(self):
#         return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def sample(self):
#         u = tf.random_uniform(tf.shape(self.logits))
#         return U.argmax(self.logits - tf.log(-tf.log(u)), axis=1)
项目:Homology_BG    作者:jyotikab    | 项目源码 | 文件源码
def spec_entropy(Rates,time_range=[],bin_w = 5.,freq_range = []):
    '''Function to calculate the spectral entropy'''

        power,freq,dfreq,dummy,dummy = mypsd(Rates,time_range,bin_w = bin_w)
        if freq_range != []:
                power = power[(freq>=freq_range[0]) & (freq <= freq_range[1])]
                freq = freq[(freq>=freq_range[0]) & (freq <= freq_range[1])]
        maxFreq = freq[np.where(power==np.max(power))]*1000*100
        perMax = (np.max(power)/np.sum(power))*100
        k = len(freq)
        power = power/sum(power)
        sum_power = 0
        for ii in range(k):
                sum_power += (power[ii]*np.log(power[ii]))
        spec_ent = -(sum_power/np.log(k))
        return spec_ent,dfreq,maxFreq,perMax
项目:wmd-relax    作者:src-d    | 项目源码 | 文件源码
def __init__(self, nlp, **kwargs):
            """
            Initializes a new instance of SpacySimilarityHook class.

            :param nlp: `spaCy language object <https://spacy.io/docs/api/language>`_.
            :param ignore_stops: Indicates whether to ignore the stop words.
            :param only_alpha: Indicates whether only alpha tokens must be used.
            :param frequency_processor: The function which is applied to raw \
                                        token frequencies.
            :type ignore_stops: bool
            :type only_alpha: bool
            :type frequency_processor: callable
            """
            self.nlp = nlp
            self.ignore_stops = kwargs.get("ignore_stops", True)
            self.only_alpha = kwargs.get("only_alpha", True)
            self.frequency_processor = kwargs.get(
                "frequency_processor", lambda t, f: numpy.log(1 + f))
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def gaussian_nll(x, mus, sigmas):
    """
    NLL for Multivariate Normal with diagonal covariance matrix
    See:
        wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function
    where \Sigma = diag(s_1^2,..., s_n^2).

    x, mus, sigmas all should have the same shape.
    sigmas (s_1,..., s_n) should be strictly positive.
    Results in output shape of similar but without the last dimension.
    """
    nll = lib.floatX(numpy.log(2. * numpy.pi))
    nll += 2. * T.log(sigmas)
    nll += ((x - mus) / sigmas) ** 2.
    nll = nll.sum(axis=-1)
    nll *= lib.floatX(0.5)
    return nll
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def monotoneTFosc(f):
    """Maps [-inf,inf] to [-inf,inf] with different constants
    for positive and negative part.

    """
    if np.isscalar(f):
        if f > 0.:
            f = np.log(f) / 0.1
            f = np.exp(f + 0.49 * (np.sin(f) + np.sin(0.79 * f))) ** 0.1
        elif f < 0.:
            f = np.log(-f) / 0.1
            f = -np.exp(f + 0.49 * (np.sin(0.55 * f) + np.sin(0.31 * f))) ** 0.1
        return f
    else:
        f = np.asarray(f)
        g = f.copy()
        idx = (f > 0)
        g[idx] = np.log(f[idx]) / 0.1
        g[idx] = np.exp(g[idx] + 0.49 * (np.sin(g[idx]) + np.sin(0.79 * g[idx])))**0.1
        idx = (f < 0)
        g[idx] = np.log(-f[idx]) / 0.1
        g[idx] = -np.exp(g[idx] + 0.49 * (np.sin(0.55 * g[idx]) + np.sin(0.31 * g[idx])))**0.1
        return g
项目:astrobase    作者:waqasbhatti    | 项目源码 | 文件源码
def logscale_img(img_array,
                 cap=255.0,
                 coeff=1000.0):
    '''
    This scales the image according to the relation:

    logscale_img = np.log(coeff*(img/max(img))+1)/np.log(coeff)

    Taken from the DS9 scaling algorithms page at:

    http://hea-www.harvard.edu/RD/ds9/ref/how.html

    According to that page:

    coeff = 1000.0 works well for optical images
    coeff = 100.0 works well for IR images

    '''

    logscaled_img = np.log(coeff*img_array/np.nanmax(img_array)+1)/np.log(coeff)
    return cap*logscaled_img
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def forward(self, outputs, targets):
        """SoftmaxCategoricalCrossEntropy forward propagation.

        .. math:: L_i = - \\sum_j{t_{i,j} \\log(p_{i,j})}

        Parameters
        ----------
        outputs : numpy.array
            Predictions in (0, 1), such as softmax output of a neural network,
            with data points in rows and class probabilities in columns.
        targets : numpy.array
            Either targets in [0, 1] matching the layout of `outputs`, or
            a vector of int giving the correct class index per data point.

        Returns
        -------
        numpy 1D array
            An expression for the item-wise categorical cross-entropy.
        """
        outputs = np.clip(outputs, self.epsilon, 1 - self.epsilon)
        return np.mean(-np.sum(targets * np.log(outputs), axis=1))
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def test_GWD(self):
        # Compute categorical crossentropy
        indices = self.mock_y > 0
        selected_log = -np.log(self.mock_x_softmax[indices])
        self.loss = 0#np.sum(selected_log) / np.sum(self.mock_y)
        # Create keras model with this activation and compile it
        model = Sequential()
        activation_layer = Lambda(lambda x: x,
                                  input_shape=self.data_shape[1:],
                                  output_shape=self.data_shape[1:]
                                  )
        model.add(activation_layer)
        model.compile('sgd', loss=gwd)

        # Predict data from the model
        loss = model.evaluate(self.mock_y, self.mock_y, batch_size=1, verbose=0)
        # Assertions
        print('Expected loss: {}'.format(self.loss))
        print('Actual loss: {}'.format(loss))
        self.assertTrue(np.allclose(loss, self.loss),
                        msg='Categorical cross-entropy loss 3D does not produce the expected results')
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def logTickValues(self, minVal, maxVal, size, stdTicks):

        ## start with the tick spacing given by tickValues().
        ## Any level whose spacing is < 1 needs to be converted to log scale

        ticks = []
        for (spacing, t) in stdTicks:
            if spacing >= 1.0:
                ticks.append((spacing, t))

        if len(ticks) < 3:
            v1 = int(np.floor(minVal))
            v2 = int(np.ceil(maxVal))
            #major = list(range(v1+1, v2))

            minor = []
            for v in range(v1, v2):
                minor.extend(v + np.log10(np.arange(1, 10)))
            minor = [x for x in minor if x>minVal and x<maxVal]
            ticks.append((None, minor))
        return ticks
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def logTickValues(self, minVal, maxVal, size, stdTicks):

        ## start with the tick spacing given by tickValues().
        ## Any level whose spacing is < 1 needs to be converted to log scale

        ticks = []
        for (spacing, t) in stdTicks:
            if spacing >= 1.0:
                ticks.append((spacing, t))

        if len(ticks) < 3:
            v1 = int(np.floor(minVal))
            v2 = int(np.ceil(maxVal))
            #major = list(range(v1+1, v2))

            minor = []
            for v in range(v1, v2):
                minor.extend(v + np.log10(np.arange(1, 10)))
            minor = [x for x in minor if x>minVal and x<maxVal]
            ticks.append((None, minor))
        return ticks
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def summary(self):
        s = OrderedDict()
        s['source kind'] = self.sourcekind
        s['source'] = self.source
        if self.params is not None:
            for param, value in self.params._asdict().items():
                s['parameter: %s' % param] = value
            s['log-variance theoretical half-life'] = self.params.logvarhalflife()
            s['log-variance theoretical unconditional s.d.'] = np.sqrt(self.params.logvaruncondvar())
        s['log-return sample mean'] = np.mean(self.svdf['logreturn'])
        s['log-return sample s.d.'] = np.sqrt(np.var(self.svdf['logreturn']))
        if 'logvar' in self.svdf.columns:
            s['log-variance sample mean'] = np.mean(self.svdf['logvar'])
            s['log-variance sample s.d.'] = np.sqrt(np.var(self.svdf['logvar']))
        s['correlation timing'] = self.cortiming        
        s['log-return forward?'] = self.logreturnforward
        s['log-return scale'] = self.logreturnscale
        return s
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
    """Multi class version of Logarithmic Loss metric.
    https://www.kaggle.com/wiki/MultiClassLogLoss
    Parameters
    ----------
    y_true : array, shape = [n_samples]
            true class, intergers in [0, n_classes - 1)
    y_pred : array, shape = [n_samples, n_classes]
    Returns
    -------
    loss : float
    """
    predictions = np.clip(y_pred, eps, 1 - eps)

    # normalize row sums to 1
    predictions /= predictions.sum(axis=1)[:, np.newaxis]

    actual = np.zeros(y_pred.shape)
    n_samples = actual.shape[0]
    actual[np.arange(n_samples), y_true.astype(int)] = 1
    vectsum = np.sum(actual * np.log(predictions))
    loss = -1.0 / n_samples * vectsum
    return loss
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, mode):
        if mode == "train" and self.mode == "test":
            raise Exception("Cannot train during test mode")

        if mode == "train":
            theano_fn = self.train_fn
            batch_gen = self.train_batch_gen
        elif mode == "test":
            theano_fn = self.test_fn
            batch_gen = self.test_batch_gen
        else:
            raise Exception("Invalid mode")

        data = next(batch_gen)
        ys = data[-1]
        data = data[:-1]
        ret = theano_fn(*data)

        return {"prediction": np.exp(ret[0]) - 1,
                "answers": ys,
                "current_loss": ret[1],
                "loss_reg": ret[2],
                "loss_mse": ret[1] - ret[2],
                "log": ""}
项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def evaluation(self, X_test, y_test):
        # normalization
        X_test = self.normalization(X_test)

        # average over the output
        pred_y_test = np.zeros([self.M, len(y_test)])
        prob = np.zeros([self.M, len(y_test)])

        '''
            Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood
        '''
        for i in range(self.M):
            w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
            pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train
            prob[i, :] = np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma) )
        pred = np.mean(pred_y_test, axis=0)

        # evaluation
        svgd_rmse = np.sqrt(np.mean((pred - y_test)**2))
        svgd_ll = np.mean(np.log(np.mean(prob, axis = 0)))

        return (svgd_rmse, svgd_ll)
项目:lstm-poetry    作者:dvictor    | 项目源码 | 文件源码
def sample(self, sess, chars, vocab, num, prime, temperature):
        state = self.cell.zero_state(1, tf.float32).eval()
        for char in prime[:-1]:
            x = np.zeros((1, 1))
            x[0, 0] = vocab[char]
            feed = {self.input_data: x, self.initial_state: state}
            [state] = sess.run([self.final_state], feed)

        def weighted_pick(a):
            a = a.astype(np.float64)
            a = a.clip(min=1e-20)
            a = np.log(a) / temperature
            a = np.exp(a) / (np.sum(np.exp(a)))
            return np.argmax(np.random.multinomial(1, a, 1))

        char = prime[-1]
        for n in range(num):
            x = np.zeros((1, 1))
            x[0, 0] = vocab[char]
            feed = {self.input_data: x, self.initial_state: state}
            [probs, state] = sess.run([self.probs, self.final_state], feed)
            p = probs[0]
            sample = weighted_pick(p)
            char = chars[sample]
            yield char
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def update_qz(self, left = None, right = None):

        if left == None:
            left = 0
            right = self.lc.n

        for index in range(left, right):
                #if index % 100 == 0:
                #    print index
                #    sys.stdout.flush()

                qz_1 = np.log(self.theta)
                qz_0 = np.log(1 - self.theta)
                for (label, worker) in zip(*self.lc.crowd_labels[index]):
                    if label >=0 and worker in self.quv:
                      qz_1 += expectation_z(self.quv[worker][0], self.quv[worker][1], label)
                      qz_0 += expectation_z(self.quv[worker][2], self.quv[worker][3], label)

                qz_1 = np.exp(qz_1)
                qz_0 = np.exp(qz_0)

                temp = qz_1 * 1.0 / (qz_0 + qz_1)
                if not math.isnan(temp):
                    self.total_changes += np.abs(self.qz[index] - temp)
                    self.qz[index] = temp
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def fit_normal(self, a):
        """
        fit a normal distribution to [(x, p)]
        where p is in log scale
        """
        # normalize p:
        #print a
        list_p = []
        for (x, p) in a:  list_p.append(p)
        ps = scipy.misc.logsumexp(list_p)

        s  = 0
        ss = 0
        for (x, p) in a:
            s  += x * np.exp(p - ps)
            ss += x*x * np.exp(p - ps)

        var = ss - s*s
        ep = 1E-300
        if var < ep: var = ep
        return (s, var)
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def eval_pdf(self, worker, var, x, list_il):
        """
        Eval *LOG* of pdf of new qu or qv
        """
        if var == 'sen':
            res = expectation_binorm('v', self.quv[worker][2], self.quv[worker][3], \
                x, self.get_mu(worker), self.get_c(worker))
            #print res
            for (item, label) in list_il:
                item_w = self.get_item_w(item)
                res += item_w * self.qz[item] * np.log( self.Ber(S(x), label))
        else:
            res = expectation_binorm('u', self.quv[worker][0], self.quv[worker][1], \
                x, self.get_mu(worker), self.get_c(worker))
            for (item, label) in list_il:
                item_w = self.get_item_w(item)
                res += item_w * (1 - self.qz[item]) * np.log( self.Ber(S(x), label))

        return res
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def expectation_z(mu, var, L, w = 3):
    """
    USE LAPLACE approx
    Evaluate the expectation of log[ S(u)^L (1-S(u))^(1-L) ]
    when u ~ Normal(mu, var)
    """
    if L == 1:
        f = lambda u: np.log(S(u))
        fpp = lambda x: -np.exp(x) / pow(1 + np.exp(x), 2)
        return f(mu) + 0.5* fpp(mu)*var
    else:
        f = lambda u: np.log(1 - S(u))
        fpp = lambda x: -np.exp(x) / pow(1 + np.exp(x), 2)
        return f(mu) + 0.5* fpp(mu)*var

    # need: E[u~N](f)
    return scipy.integrate.quad(f, mu - w*std, mu + w*std)[0]
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def expectation_z_quad(mu, var, L, w = 3):
    """
    USE QUAD (NUMERICAL INTEGRATION)
    Evaluate the expectation of log[ S(u)^L (1-S(u))^(1-L) ]
    when u ~ Normal(mu, var)
    """
    #U = np.random.normal(mu, np.sqrt(var), num)
    if L == 1:
        f = lambda u: scipy.stats.norm.pdf(u, loc = mu, scale = np.sqrt(var) ) * np.log(S(u))
    else:
        f = lambda u: scipy.stats.norm.pdf(u, loc = mu, scale = np.sqrt(var) ) * (np.log(1 - S(u)))

    #return f
    std = np.sqrt(var)
    #return scipy.integrate.quad(f, mu - w*std, mu + w*std, epsabs = 1.0e-4, epsrel = 1.0e-4, limit = 25)[0]
    return scipy.integrate.quad(f, mu - w*std, mu + w*std)[0]
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def expectation_binorm(rv, mu, var, x, M, C, w = 3):
    """
    Evaluate the expectation of log Norm(uv| M, C)
         x = u, v ~ Norm(mu, var) rv == 'v'
         x = v, u ~ Norm(mu, var) rv == 'u'
    """
    #print rv, mu, var, x, M, C
    if rv == 'v':
      f = lambda v: scipy.stats.norm.pdf(v, loc = mu, scale = np.sqrt(var) ) * \
        np.log(scipy.stats.multivariate_normal.pdf([x, v], mean = M, cov = C, allow_singular = True))
    else:
      f = lambda u: scipy.stats.norm.pdf(u, loc = mu, scale = np.sqrt(var) ) * \
        np.log(scipy.stats.multivariate_normal.pdf([u, x], mean = M, cov = C, allow_singular = True))

    #return f
    #print f(mu)
    std = np.sqrt(var)
    return scipy.integrate.quad(f, mu - w*std, mu + w*std)[0]
项目:kernel_goodness_of_fit    作者:karlnapf    | 项目源码 | 文件源码
def austerity(log_lik,log_density_prior, X,epsilon,batch_size=30,chain_size=10000, thinning=1, theta_t=np.random.randn()):
    A = [theta_t]
    N = X.shape[0]
    dimension=1
    if hasattr(theta_t, "__len__"):
        dimension = len(theta_t)

    global_evals = 0
    for i in range(chain_size*thinning-1):
        # if i % 1000 ==0:
        #     print( 100.0*i/(chain_size*thinning), ' %')
        theta_prime = np.random.randn(dimension)+theta_t

        u = np.random.rand()
        mu_0 = np.log(u)+log_density_prior(theta_t) -log_density_prior(theta_prime)
        mu_0 = mu_0/N

        accept,evals = approximate_MH_accept(mu_0, log_lik, X, batch_size, epsilon, theta_prime, theta_t, N)
        global_evals += evals
        if accept:
           theta_t = theta_prime

        A.append(theta_t)

    return np.array(A[::thinning]),global_evals
项目:seqhawkes    作者:mlukasik    | 项目源码 | 文件源码
def evaluate_stance(
        self,
        testN,
        testtimes,
        testinfecting_vec,
        testinfected_vec,
        testeventmemes,
        testW,
        testT,
        ):
        predictednode_vec = []

        for next_event_index in xrange(len(testtimes)):
            print 'considering event', next_event_index
            words = testW[next_event_index, :]

            predictions = []
            for label in set(self.node_vec):
                loglikelihood_term = 0
                loglikelihood_term += np.dot(words,
                        np.log(self.beta[label, :]))
                predictions.append((label, loglikelihood_term))
            predictednode_vec.append(max(predictions, key=lambda x: \
                    x[1])[0])
        return predictednode_vec
项目:seqhawkes    作者:mlukasik    | 项目源码 | 文件源码
def _beta_update_raw_tfidf(self):
        '''
        Run only once - it does not depend on other parameters.
        '''

        for nodeid in xrange(self.D):
            self.beta[nodeid] = self.W[self.node_vec == nodeid, :
                    ].sum(axis=0)
        for nodeid in xrange(self.D):
            for wordid in xrange(self.beta.shape[1]):
                docs_cnt = np.sum(self.W[self.node_vec == nodeid,
                                  wordid] >= 1)
                docs_cnt += 1  # smooth by adding one
                self.beta[nodeid][wordid] *= 1 + np.log(self.W.shape[0]
                        * 1. / docs_cnt)  # 1+ because we still want to keep words which always occurr, but probably it never happens

        # Laplace smoothing to avoid zeros!

        self.beta += 1
        self._normalize_beta_rowwise()
        return self.beta
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def b_value(mags, mt, perc=[2.5, 97.5], n_reps=None):
    """Compute the b-value and optionally its confidence interval."""
    # Extract magnitudes above completeness threshold
    m = mags[mags >= mt]

    # Compute b-value
    b = (np.mean(m) - mt) * np.log(10)

    # Draw bootstrap replicates
    if n_reps is None:
        return b
    else:
        m_bs_reps = dcst.draw_bs_reps(m, np.mean, size=n_reps)

        # Compute b-value from replicates
        b_bs_reps = (m_bs_reps - mt) * np.log(10)

        # Compute confidence interval
        conf_int = np.percentile(b_bs_reps, perc)

        return b, conf_int
项目:STA141C    作者:clarkfitzg    | 项目源码 | 文件源码
def f(w, lamb):
    """
    Eq. (2) in problem 2

    Non-vectorized, slow
    """
    total = 0
    nrows = X.shape[0]
    for i in range(nrows):
        current = 1 + np.exp(-y[i] * X[i, ].dot(w))
        total += np.log(current)
    total += (lamb / 2) * w.dot(w)
    return total
项目:STA141C    作者:clarkfitzg    | 项目源码 | 文件源码
def f2(w, lamb):
    """
    Eq. (2) in problem 2

    Vectorized (no explicit loops), fast
    """
    yxTw = y * X.dot(w)
    firstpart = np.log(1 + np.exp(-yxTw))
    total = firstpart.sum()
    total += (lamb / 2) * w.dot(w)
    return total
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def pac_metric (solution, prediction, task='binary.classification'):
    ''' Probabilistic Accuracy based on log_loss metric. 
    We assume the solution is in {0, 1} and prediction in [0, 1].
    Otherwise, run normalize_array.''' 
    debug_flag=False
    [sample_num, label_num] = solution.shape
    if label_num==1: task='binary.classification'
    eps = 1e-15
    the_log_loss = log_loss(solution, prediction, task)
    # Compute the base log loss (using the prior probabilities)    
    pos_num = 1.* sum(solution) # float conversion!
    frac_pos = pos_num / sample_num # prior proba of positive class
    the_base_log_loss = prior_log_loss(frac_pos, task)
    # Alternative computation of the same thing (slower)    
    # Should always return the same thing except in the multi-label case
    # For which the analytic solution makes more sense
    if debug_flag:
        base_prediction = np.empty(prediction.shape)
        for k in range(sample_num): base_prediction[k,:] = frac_pos
        base_log_loss = log_loss(solution, base_prediction, task)  
        diff = np.array(abs(the_base_log_loss-base_log_loss))
        if len(diff.shape)>0: diff=max(diff)
        if(diff)>1e-10: 
            print('Arrggh {} != {}'.format(the_base_log_loss,base_log_loss))
    # Exponentiate to turn into an accuracy-like score.
    # In the multi-label case, we need to average AFTER taking the exp 
    # because it is an NL operation
    pac = mvmean(np.exp(-the_log_loss)) 
    base_pac = mvmean(np.exp(-the_base_log_loss))
    # Normalize: 0 for random, 1 for perfect    
    score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac))
    return score
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def log_loss(solution, prediction, task = 'binary.classification'):
    ''' Log loss for binary and multiclass. '''
    [sample_num, label_num] = solution.shape
    eps = 1e-15

    pred = np.copy(prediction) # beware: changes in prediction occur through this
    sol = np.copy(solution)
    if (task == 'multiclass.classification') and (label_num>1):
        # Make sure the lines add up to one for multi-class classification
        norma = np.sum(prediction, axis=1)
        for k in range(sample_num):
            pred[k,:] /= sp.maximum (norma[k], eps) 
        # Make sure there is a single label active per line for multi-class classification
        sol = binarize_predictions(solution, task='multiclass.classification')
        # For the base prediction, this solution is ridiculous in the multi-label case

    # Bounding of predictions to avoid log(0),1/0,...
    pred = sp.minimum (1-eps, sp.maximum (eps, pred))
    # Compute the log loss    
    pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
    if (task != 'multiclass.classification') or (label_num==1):
        # The multi-label case is a bunch of binary problems.
        # The second class is the negative class for each column.
        neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
        log_loss = pos_class_log_loss + neg_class_log_loss
        # Each column is an independent problem, so we average.
        # The probabilities in one line do not add up to one.
        # log_loss = mvmean(log_loss) 
        # print('binary {}'.format(log_loss))
        # In the multilabel case, the right thing i to AVERAGE not sum
        # We return all the scores so we can normalize correctly later on
    else:
        # For the multiclass case the probabilities in one line add up one.
        log_loss = pos_class_log_loss
        # We sum the contributions of the columns.
        log_loss = np.sum(log_loss) 
        #print('multiclass {}'.format(log_loss))
    return log_loss
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def prior_log_loss(frac_pos, task = 'binary.classification'):
    ''' Baseline log loss. For multiplr classes ot labels return the volues for each column'''
    eps = 1e-15   
    frac_pos_ = sp.maximum (eps, frac_pos)
    if (task != 'multiclass.classification'): # binary case
        frac_neg = 1-frac_pos
        frac_neg_ = sp.maximum (eps, frac_neg)
        pos_class_log_loss_ = - frac_pos * np.log(frac_pos_)
        neg_class_log_loss_ = - frac_neg * np.log(frac_neg_)
        base_log_loss = pos_class_log_loss_ + neg_class_log_loss_
        # base_log_loss = mvmean(base_log_loss)
        # print('binary {}'.format(base_log_loss))
        # In the multilabel case, the right thing i to AVERAGE not sum
        # We return all the scores so we can normalize correctly later on
    else: # multiclass case
        fp = frac_pos_ / sum(frac_pos_) # Need to renormalize the lines in multiclass case
        # Only ONE label is 1 in the multiclass case active for each line
        pos_class_log_loss_ = - frac_pos * np.log(fp)
        base_log_loss = np.sum(pos_class_log_loss_) 
    return base_log_loss

# sklearn implementations for comparison
项目:topically-driven-language-model    作者:jhlau    | 项目源码 | 文件源码
def sample(self, probs, temperature):
        if temperature == 0:
            return np.argmax(probs)

        probs = probs.astype(np.float64) #convert to float64 for higher precision
        probs = np.log(probs) / temperature
        probs = np.exp(probs) / math.fsum(np.exp(probs))
        return np.argmax(np.random.multinomial(1, probs, 1))

    #generate a sentence given conv_hidden
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def test(self, vocab_size, use_onto_lstm, S_ind_test=None, C_ind_test=None, hierarchical=False, base=2, oov_list=None):
    X_test = C_ind_test[:,:-1] if use_onto_lstm else S_ind_test[:,:-1] # remove the last words' hyps in all sentences
    Y_inds_test = S_ind_test[:,1:]
    if hierarchical:
      test_targets = self._factor_target_indices(Y_inds_test, vocab_size, base=base)
    else:
      test_targets = [self._make_one_hot(Y_inds_test, vocab_size)]
    print >>sys.stderr, "Evaluating model on test data"
    test_loss = self.model.evaluate(X_test, test_targets)
    print >>sys.stderr, "Test loss: %.4f"%test_loss
    if oov_list is not None:
      oov_inds = [self.dp.word_index[w] for w in oov_list]
      non_oov_Y_inds = numpy.copy(Y_inds_test)
      for ind in oov_inds:
    non_oov_Y_inds[non_oov_Y_inds == ind] = 0
      non_oov_test_targets = self._factor_target_indices(non_oov_Y_inds, vocab_size, base=base)
      non_oov_test_loss = self.model.evaluate(X_test, non_oov_test_targets)
      print >>sys.stderr, "Non-oov test loss: %.4f"%non_oov_test_loss
    factored_test_preds = [-((numpy.log(pred) * target).sum(axis=-1)) for pred, target in zip(self.model.predict(X_test), test_targets)]
    test_preds = sum(factored_test_preds)
    #non_null_probs = []
    #for test_pred, inds in zip(test_preds, Y_inds_test):
    #  wanted_probs = []
    #  for tp, ind in zip(test_pred, inds):
    #    if ind != 0:
    #      wanted_probs.append(tp)
    #  non_null_probs.append(wanted_probs)
    #return non_null_probs
    return test_preds
项目:pylspm    作者:lseman    | 项目源码 | 文件源码
def data_log_likelihood(self, dataSplit, coefficients, variances):

        log_likelihood = 0.0

        for k in range(self.num_components):

            coef_ = coefficients[k]

            Beta = coef_.ix[self.endoVar][self.endoVar]
            Gamma = coef_.ix[self.endoVar][self.exoVar]

            a_ = (np.dot(Beta, self.fscores[
                  self.endoVar].T) + np.dot(Gamma, self.fscores[self.exoVar].T))

            invert_ = np.linalg.inv(np.array(variances[k]))

            exponential = np.exp(-0.5 * np.dot(np.dot(a_.T, invert_), a_))

            den = (((2 * np.pi)**(self.Q / 2)) *
                   np.sqrt(np.linalg.det(variances[k])))
            probabilities = exponential[0] / den

            log_likelihood += np.log(probabilities).sum()

        print(log_likelihood)
        return log_likelihood
项目:pylspm    作者:lseman    | 项目源码 | 文件源码
def BTS(data):

    n = data.shape[0]
    p = data.shape[1]

    chi2 = -(n - 1 - (2 * p + 5) / 6) * \
        np.log(np.linalg.det(pd.DataFrame.corr(data)))
    df = p * (p - 1) / 2

    pvalue = scipy.stats.distributions.chi2.sf(chi2, df)

    return [chi2, pvalue]
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def build_model(self):
    self.x = tf.placeholder(tf.float32, [self.reader.vocab_size], name="input")
    self.x_idx = tf.placeholder(tf.int32, [None], name="x_idx")

    self.build_encoder()
    self.build_generator()

    # Kullback Leibler divergence
    self.e_loss = -0.5 * tf.reduce_sum(1 + self.log_sigma_sq - tf.square(self.mu) - tf.exp(self.log_sigma_sq))

    # Log likelihood
    self.g_loss = -tf.reduce_sum(tf.log(tf.gather(self.p_x_i, self.x_idx) + 1e-10))

    self.loss = self.e_loss + self.g_loss

    self.encoder_var_list, self.generator_var_list = [], []
    for var in tf.trainable_variables():
      if "encoder" in var.name:
        self.encoder_var_list.append(var)
      elif "generator" in var.name:
        self.generator_var_list.append(var)

    # optimizer for alternative update
    self.optim_e = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.e_loss, global_step=self.step, var_list=self.encoder_var_list)
    self.optim_g = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.g_loss, global_step=self.step, var_list=self.generator_var_list)

    # optimizer for one shot update
    self.optim = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.loss, global_step=self.step)

    _ = tf.scalar_summary("encoder loss", self.e_loss)
    _ = tf.scalar_summary("generator loss", self.g_loss)
    _ = tf.scalar_summary("total loss", self.loss)
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def edge_logits(self):
        """Get edge log probabilities on the complete graph."""
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def logprob(self, data):
        """Compute non-normalized log probabilies of many rows of data."""
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def logprob(self, data):
        logprobs = np.stack(
            [server.logprob(data) for server in self._ensemble])
        logprobs = logsumexp(logprobs, axis=0)
        logprobs -= np.log(len(self._ensemble))
        assert logprobs.shape == (data.shape[0], )
        return logprobs
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def edge_logits(self):
        """A [K]-shaped array of log odds of edges in the complete graph."""
        return self._server.edge_logits
项目:MMD-Variational-Autoencoder    作者:ShengjiaZhao    | 项目源码 | 文件源码
def make_model_path(name):
    log_path = os.path.join('log', name)
    if os.path.isdir(log_path):
        subprocess.call(('rm -rf %s' % log_path).split())
    os.makedirs(log_path)
    return log_path
项目:MMD-Variational-Autoencoder    作者:ShengjiaZhao    | 项目源码 | 文件源码
def compute_log_sum(val):
    min_val = np.min(val, axis=0, keepdims=True)
    return np.mean(min_val - np.log(np.mean(np.exp(-val + min_val), axis=0)))
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getInitialHyps(self, X, C, y):
        self.logdetXX  = np.linalg.slogdet(C.T.dot(C))[1]

        hyp0_sig2e = [0.5*np.log(0.5*y.var())]
        Linreg = sklearn.linear_model.LinearRegression(fit_intercept=False, normalize=False, copy_X=False)
        Linreg.fit(C, y)
        hyp0_fixedEffects = Linreg.coef_        
        return hyp0_sig2e, hyp0_fixedEffects
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def rankRegions(self, X, C, y, pos, regionLength, reml=True):

        #get resiong list
        regionsList = self.createRegionsList(pos, regionLength)

        #precompute log determinant of covariates
        XX = C.T.dot(C)
        [Sxx,Uxx]= la.eigh(XX)
        logdetXX  = np.log(Sxx).sum()

        #score each region
        betas = np.zeros(len(regionsList))
        for r_i, r in enumerate(regionsList):
            regionSize = len(r)

            if (self.verbose and r_i % 1000==0):
                print 'Testing region ' + str(r_i+1)+'/'+str(len(regionsList)),
                print 'with', regionSize, 'SNPs\t'

            s,U = self.eigenDecompose(X[:, np.array(r)], None)
            sig2g_kernel, sig2e_kernel, fixedEffects, ll = self.optSigma2(U, s, y, C, logdetXX, reml)
            betas[r_i] = ll

        return regionsList, betas


    ### this code is taken from the FastLMM package (see attached license)###