Python numpy 模块,reciprocal() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.reciprocal()

项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def guess(representation, sims, xi, a, a_, b):
    sa = sims[xi[a]]
    sa_ = sims[xi[a_]]
    sb = sims[xi[b]]

    add_sim = -sa+sa_+sb
    if a in representation.wi:
        add_sim[representation.wi[a]] = 0
    if a_ in representation.wi:
        add_sim[representation.wi[a_]] = 0
    if b in representation.wi:
        add_sim[representation.wi[b]] = 0
    b_add = representation.iw[np.nanargmax(add_sim)]

    mul_sim = sa_*sb*np.reciprocal(sa+0.01)
    if a in representation.wi:
        mul_sim[representation.wi[a]] = 0
    if a_ in representation.wi:
        mul_sim[representation.wi[a_]] = 0
    if b in representation.wi:
        mul_sim[representation.wi[b]] = 0
    b_mul = representation.iw[np.nanargmax(mul_sim)]

    return b_add, b_mul
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def calc_pmi(counts, cds):
    """
    Calculates e^PMI; PMI without the log().
    """
    sum_w = np.array(counts.sum(axis=1))[:, 0]
    sum_c = np.array(counts.sum(axis=0))[0, :]
    if cds != 1:
        sum_c = sum_c ** cds
    sum_total = sum_c.sum()
    sum_w = np.reciprocal(sum_w)
    sum_c = np.reciprocal(sum_c)

    pmi = csr_matrix(counts)
    pmi = multiply_by_rows(pmi, sum_w)
    pmi = multiply_by_columns(pmi, sum_c)
    pmi = pmi * sum_total
    return pmi
项目:PyDMD    作者:mathLab    | 项目源码 | 文件源码
def _build_lowrank_op(U, s, V, Y):
        """
        Private method that computes the lowrank operator from the singular
        value decomposition of matrix X and the matrix Y.

        .. math::

            \\mathbf{\\tilde{A}} =
                \\mathbf{U}^* \\mathbf{Y} \\mathbf{X}^\\dagger \\mathbf{U} =
                \\mathbf{U}^* \\mathbf{Y} \\mathbf{V} \\mathbf{S}^{-1}

        :param numpy.ndarray U: 2D matrix that contains the left-singular
            vectors of X, stored by column.
        :param numpy.ndarray s: 1D array that contains the singular values of X.
        :param numpy.ndarray V: 2D matrix that contains the right-singular
            vectors of X, stored by row.
        :param numpy.ndarray Y: input matrix Y.
        :return: the lowrank operator
        :rtype: numpy.ndarray
        """
        return U.T.conj().dot(Y).dot(V) * np.reciprocal(s)
项目:describe    作者:SINGROUP    | 项目源码 | 文件源码
def get_inverse_distance_matrix(self):
        """Calculates the inverse distance matrix A defined as:

            A_ij = 1/|r_i - r_j|

        For periodic systems the distance of an atom from itself is the
        smallest displacement of an atom from one of it's periodic copies, and
        the distance of two different atoms is the distance of two closest
        copies.

        Returns:
            np.array: Symmetric 2D matrix containing the pairwise inverse
            distances.
        """
        if self._inverse_distance_matrix is None:
            distance_matrix = self.get_distance_matrix()
            with np.errstate(divide='ignore'):
                inv_distance_matrix = np.reciprocal(distance_matrix)
            self._inverse_distance_matrix = inv_distance_matrix
        return self._inverse_distance_matrix
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def pdf(cls, x, a, b):
        """Density function at `x`.

        Parameters
        ----------
        x : float or array-like
        a : float or array-like
        b : float or array-like

        Returns
        -------
        np.array

        """
        with np.errstate(divide='ignore'):
            p = np.where((x < np.exp(a)) | (x > np.exp(b)), 0, np.reciprocal(x))
            p /= (b - a)  # normalize
        return p
项目:power_flow    作者:BeierZhu    | 项目源码 | 文件源码
def XB(Y,index_PQ, index_P, n_PQ, n_P):
    case_number, _ = np.shape(Y)
    G, B = iterator.util.get_G_B(Y)
    X = np.zeros((case_number, case_number))
    B_p = np.zeros((n_P,n_P))
    B_pp = np.zeros((n_PQ,n_PQ))
    for i in xrange(case_number):
        for j in xrange(case_number):
            if LA.norm(Y[i][j]) > 1e-5 and i != j:
                X[i][j] = np.reciprocal(np.imag(np.reciprocal(Y[i][j])))

    for i in xrange(case_number):
        for j in xrange(case_number):
            if i != j:
                X[i][i] -= X[i][j]

    for i in xrange(0, n_P):
        for j in xrange(0, n_P):
            B_p[i][j] = X[index_P[i]][index_P[j]]
    #---------------------------------------------------
    for i in xrange(0, n_PQ):
        for j in xrange(0, n_PQ):
            B_pp[i][j] = B[index_PQ[i]][index_PQ[j]]

    return B_p, B_pp
项目:clinspell    作者:clips    | 项目源码 | 文件源码
def comp_sum(vectors, reciprocal=False):
        """
        :param vectors: vectors to be composed
        :param reciprocal: if True, apply reciprocal weighting
        :return: composed vector representation
        """

        if not reciprocal:
            composed_vector = np.sum(vectors, axis=0)

        else:
            weight_vector = np.reciprocal(np.arange(1., len(vectors) + 1))
            weighted_vectors = []
            for i, weight in enumerate(weight_vector):
                weighted_vectors.append(vectors[i] * weight)
            composed_vector = reduce(lambda x, y: x + y, weighted_vectors)

        return composed_vector
项目:clinspell    作者:clips    | 项目源码 | 文件源码
def comp_mult(vectors, reciprocal=False):
        """
        :param vectors: vectors to be composed
        :param reciprocal: if True, apply reciprocal weighting
        :return: composed vector representation
        """

        if not reciprocal:
            composed_vector = reduce(lambda x, y: x * y, vectors)

        else:
            weight_vector = np.reciprocal(np.arange(1., len(vectors) + 1))
            weighted_vectors = []
            for i, weight in enumerate(weight_vector):
                weighted_vectors.append(vectors[i] * weight)
            composed_vector = reduce(lambda x, y: x * y, weighted_vectors)

        return composed_vector
项目:clinspell    作者:clips    | 项目源码 | 文件源码
def comp_max(vectors, reciprocal=False):
        """
        :param vectors: vectors to be composed
        :param reciprocal: if True, apply reciprocal weighting
        :return: composed vector representation
        """

        if not reciprocal:
            composed_vector = np.amax(vectors, axis=0)

        else:
            weight_vector = np.reciprocal(np.arange(1., len(vectors) + 1))
            weighted_vectors = []
            for i, weight in enumerate(weight_vector):
                weighted_vectors.append(vectors[i] * weight)
            composed_vector = np.amax(weighted_vectors, axis=0)

        return composed_vector
项目:TrustBasedRecommendation    作者:AKIRA85    | 项目源码 | 文件源码
def calcuate_similarity(pivot_table, user_data, product_data, i, j, w_lambda):
    if i==j:
        return 0
    normalize_freq = np.max(product_data['count'].values)
    common = (pivot_table[i]*pivot_table[j]).nonzero()

    rating_i = pivot_table[i][common[0]]
    rating_j = pivot_table[j][common[0]]
    rating_i = rating_i - user_data.iloc[i, 0]
    rating_j = rating_j - user_data.iloc[j, 0]
    variance = rating_i*rating_j

    reputation = product_data.iloc[common[0], 0].as_matrix()/5
    frequency = product_data.iloc[common[0], 2]/normalize_freq
    val = np.sum(np.sqrt(w_lambda*np.square(np.reciprocal(reputation))+(1-w_lambda)*np.square(np.reciprocal(frequency)))*variance)
    return val/ ( max(user_data.iloc[i, 1], 1)*max(user_data.iloc[j, 1], 1) )

#Read ratings data
项目:TrustBasedRecommendation    作者:AKIRA85    | 项目源码 | 文件源码
def calcuate_similarity(pivot_table, user_data, product_data, i, j, w_lambda):
    if i==j:
        return 0
    normalize_freq = np.max(product_data['count'].values)
    common = (pivot_table[i]*pivot_table[j]).nonzero()

    rating_i = pivot_table[i][common[0]]
    rating_j = pivot_table[j][common[0]]
    rating_i = rating_i - user_data.iloc[i, 0]
    rating_j = rating_j - user_data.iloc[j, 0]
    variance = rating_i*rating_j

    reputation = product_data.iloc[common[0], 0].as_matrix()/5
    frequency = product_data.iloc[common[0], 2]/normalize_freq
    val = np.sum(np.sqrt(w_lambda*np.square(np.reciprocal(reputation))+(1-w_lambda)*np.square(np.reciprocal(frequency))))
    return val/ ( max(user_data.iloc[i, 1], 1)*max(user_data.iloc[j, 1], 1) )

#create pandas dataframe
# df = pd.read_csv('dataset/ratings_Electronics_compressed.csv', 
#   header=None, 
#   names=['reviewerID', 'productID', 'overall', 'unixReviewTime'], 
#   sep=',', 
#   dtype={'reviewerID':int, 'productID':int, 'overall':int, 'unixReviewTime':int})
项目:TrustBasedRecommendation    作者:AKIRA85    | 项目源码 | 文件源码
def calcuate_similarity(pivot_table, user_data, product_data, i, j, w_lambda):
    if i==j:
        return 0
    normalize_freq = np.max(product_data['count'].values)
    common = (pivot_table[i]*pivot_table[j]).nonzero()

    rating_i = pivot_table[i][common[0]]
    rating_j = pivot_table[j][common[0]]
    rating_i = rating_i - user_data.iloc[i, 0]
    rating_j = rating_j - user_data.iloc[j, 0]
    variance = rating_i*rating_j

    reputation = product_data.iloc[common[0], 0].as_matrix()/5
    frequency = product_data.iloc[common[0], 2]/normalize_freq
    val = np.sum(np.sqrt(w_lambda*np.square(np.reciprocal(reputation))+(1-w_lambda)*np.square(np.reciprocal(frequency)))*variance)
    return val/ ( max(user_data.iloc[i, 1], 1)*max(user_data.iloc[j, 1], 1) )

#Read ratings data
项目:TrustBasedRecommendation    作者:AKIRA85    | 项目源码 | 文件源码
def calcuate_similarity(pivot_table, user_data, product_data, i, j, w_lambda):
    if i==j:
        return 0
    normalize_freq = np.max(product_data['count'].values)
    common = (pivot_table[i]*pivot_table[j]).nonzero()

    rating_i = pivot_table[i][common[0]]
    rating_j = pivot_table[j][common[0]]
    rating_i = rating_i - user_data.iloc[i, 0]
    rating_j = rating_j - user_data.iloc[j, 0]
    variance = rating_i*rating_j

    reputation = product_data.iloc[common[0], 0].as_matrix()/5
    frequency = product_data.iloc[common[0], 2]/normalize_freq
    val = np.sum(np.sqrt(w_lambda*np.square(np.reciprocal(reputation))+(1-w_lambda)*np.square(np.reciprocal(frequency)))*variance)
    return val/ ( max(user_data.iloc[i, 1], 1)*max(user_data.iloc[j, 1], 1) )

#Read ratings data
项目:TrustBasedRecommendation    作者:AKIRA85    | 项目源码 | 文件源码
def calcuate_similarity(pivot_table, user_data, product_data, i, j, w_lambda):
    if i==j:
        return 0
    normalize_freq = np.max(product_data['count'].values)
    common = (pivot_table[i]*pivot_table[j]).nonzero()

    rating_i = pivot_table[i][common[0]]
    rating_j = pivot_table[j][common[0]]
    rating_i = rating_i - user_data.iloc[i, 0]
    rating_j = rating_j - user_data.iloc[j, 0]
    variance = rating_i*rating_j

    reputation = product_data.iloc[common[0], 0].as_matrix()/5
    frequency = product_data.iloc[common[0], 2]/normalize_freq
    val = np.sum(np.sqrt(w_lambda*np.square(np.reciprocal(reputation))+(1-w_lambda)*np.square(np.reciprocal(frequency)))*variance)
    return val/ ( max(user_data.iloc[i, 1], 1)*max(user_data.iloc[j, 1], 1) )

#create pandas dataframe
# df = pd.read_csv('dataset/ratings_Electronics_compressed.csv', 
#   header=None, 
#   names=['reviewerID', 'productID', 'overall', 'unixReviewTime'], 
#   sep=',', 
#   dtype={'reviewerID':int, 'productID':int, 'overall':int, 'unixReviewTime':int})
项目:TrustBasedRecommendation    作者:AKIRA85    | 项目源码 | 文件源码
def calcuate_similarity(pivot_table, user_data, product_data, i, j, w_lambda):
    if i==j:
        return 0
    normalize_freq = np.max(product_data['count'].values)
    common = (pivot_table[i]*pivot_table[j]).nonzero()

    rating_i = pivot_table[i][common[0]]
    rating_j = pivot_table[j][common[0]]
    rating_i = rating_i - user_data.iloc[i, 0]
    rating_j = rating_j - user_data.iloc[j, 0]
    variance = rating_i*rating_j

    reputation = product_data.iloc[common[0], 0].as_matrix()/5
    frequency = product_data.iloc[common[0], 2]/normalize_freq
    val = np.sum(np.sqrt(w_lambda*np.square(np.reciprocal(reputation))+(1-w_lambda)*np.square(np.reciprocal(frequency)))*variance)
    return val/ ( max(user_data.iloc[i, 1], 1)*max(user_data.iloc[j, 1], 1) )

#Read ratings data
项目:torchsample    作者:ncullen93    | 项目源码 | 文件源码
def _butterworth_filter(rows, cols, thresh, order):
    # X and Y matrices with ranges normalised to +/- 0.5
    array1 = np.ones(rows)
    array2 = np.ones(cols)
    array3 = np.arange(1,rows+1)
    array4 = np.arange(1,cols+1)

    x = np.outer(array1, array4)
    y = np.outer(array3, array2)

    x = x - float(cols/2) - 1
    y = y - float(rows/2) - 1

    x = x / cols
    y = y / rows

    radius = np.sqrt(np.square(x) + np.square(y))

    matrix1 = radius/thresh
    matrix2 = np.power(matrix1, 2*order)
    f = np.reciprocal(1 + matrix2)

    return f
项目:GMM_using_EM    作者:go2chayan    | 项目源码 | 文件源码
def Mstep(dataSet,W):
    (N, M) = np.shape(dataSet)
    K = np.size(W,1)
    # Each column of MU represents the mean of a cluster. 
    # So, for K clusters, there will be K columns of MU
    # Each column,
    # mu_k = (1/N_k)*sum_{1}^{N}{w_{ik}*x_i} 
    N_k = np.sum(W,0)
    Alpha = N_k/np.sum(N_k)
    Mu = dataSet.T.dot(W).dot(np.diag(np.reciprocal(N_k)))
    # SIGMA is a 3-dimensional matrix of size MxMxK. 
    # It contains K covariances for each cluster
    Sigma = np.zeros([M,M,K])
    for k in range(K):
        datMeanSub = dataSet.T - Mu[0:,k][None].T.dot(np.ones([1,N]))
        Sigma[:,:,k] = (datMeanSub.dot(np.diag(W[0:,k])).dot(datMeanSub.T))/N_k[k]
    return Alpha,Mu,Sigma
项目:GMM_using_EM    作者:go2chayan    | 项目源码 | 文件源码
def Estep(dataSet,Alpha,Mu,Sigma):
    # We will calculate the membership weight matrix W here. W is an
    # NxK matrix where (i,j)th element represents the probability of
    # ith data point to be a member of jth cluster given the parameters
    # Alpha, Mu and Sigma
    N = np.size(dataSet,0)
    K = np.size(Alpha)
    W = np.zeros([N,K])
    for k in range(K):
        for i in range(N):
            W[i,k] = Alpha[k]*norm_pdf_multivariate(dataSet[i,:][None].T, \
                     Mu[:,k][None].T,Sigma[:,:,k])
    # Normalize W row-wise because each row represents a pdf. In other words,
    # probability of a point to be any one of the K clusters is equal to 1.
    W = W*np.reciprocal(np.sum(W,1)[None].T)
    return W
项目:ngram2vec    作者:zhezhaoa    | 项目源码 | 文件源码
def guess(representation, sims, xi, a, a_, b):
    sa = sims[xi[a]]
    sa_ = sims[xi[a_]]
    sb = sims[xi[b]]

    add_sim = -sa+sa_+sb
    if a in representation.wi:
        add_sim[representation.wi[a]] = 0
    if a_ in representation.wi:
        add_sim[representation.wi[a_]] = 0
    if b in representation.wi:
        add_sim[representation.wi[b]] = 0
    b_add = representation.iw[np.nanargmax(add_sim)]

    mul_sim = sa_*sb*np.reciprocal(sa+0.01)
    if a in representation.wi:
        mul_sim[representation.wi[a]] = 0
    if a_ in representation.wi:
        mul_sim[representation.wi[a_]] = 0
    if b in representation.wi:
        mul_sim[representation.wi[b]] = 0
    b_mul = representation.iw[np.nanargmax(mul_sim)]

    return b_add, b_mul
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def predict_proba(self, X):
        """Estimate probability.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Input data.

        Returns
        -------
        C : array, shape (n_samples, n_classes)
            Estimated probabilities.
        """
        prob = self.decision_function(X)
        prob *= -1
        np.exp(prob, prob)
        prob += 1
        np.reciprocal(prob, prob)
        if len(self.classes_) == 2:  # binary case
            return np.column_stack([1 - prob, prob])
        else:
            # OvR normalization, like LibLinear's predict_probability
            prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
            return prob
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def _predict_proba_lr(self, X):
        """Probability estimation for OvR logistic regression.

        Positive class probabilities are computed as
        1. / (1. + np.exp(-self.decision_function(X)));
        multiclass is handled by normalizing that over all classes.
        """
        prob = self.decision_function(X)
        prob *= -1
        np.exp(prob, prob)
        prob += 1
        np.reciprocal(prob, prob)
        if prob.ndim == 1:
            return np.vstack([1 - prob, prob]).T
        else:
            # OvR normalization, like LibLinear's predict_probability
            prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
            return prob
项目:SLAM-Robot_Simu    作者:takuyani    | 项目源码 | 文件源码
def __resampling(self, px, pw):
        '''?????????
            ?????????(ESS)?????????????????????????
        ???
            px??????????
            pw??????????
        ????
            px???????????????
            pw???????????????
        '''
        ess = float(np.reciprocal(pw @ pw.T))
        if ess < self.__ESS_TH:
            pw_cum = np.cumsum(pw)
            base_id = np.arange(0.0, 1.0, self.__NP_RECIP)
            ofs = np.random.rand() * self.__NP_RECIP  # ?????????
            resample_id = base_id + ofs
            px_temp = np.copy(px)
            idx = 0
            for i in range(self.__NP):
                while resample_id[i] > pw_cum[idx]:
                    idx += 1
                px[:, i] = px_temp[:, idx]
            pw = np.copy(self.__pw_ini)  # ??????

        return px, pw
项目:go-NN    作者:TheDuck314    | 项目源码 | 文件源码
def compute_svd_normalization(samples, Ndiscard, max_rescale):
    # S is list of singular values in descending order
    # Each row of V is a list of the weights of the features in a given principal component
    centered_samples = samples - samples.mean(axis=0) # subtract columnwise means
    U, S, V = np.linalg.svd(centered_samples, full_matrices=False)
    Nsamp = samples.shape[0]
    component_stddevs = S / math.sqrt(Nsamp)

    print "singular values ="
    print S
    print "component standard deviations ="
    print component_stddevs
    print "V matrix ="
    print V

    Nfeat = samples.shape[1]
    rescaling_factors = np.minimum(np.reciprocal(component_stddevs[:Nfeat-Ndiscard]), max_rescale)
    whitening_matrix = np.dot(V[:Nfeat-Ndiscard].T, np.diag(rescaling_factors))

    print "Ndiscard =", Ndiscard
    print "max_rescale =", max_rescale
    print "rescaling_factors ="
    print rescaling_factors
    print "whitening_matrix ="
    print repr(whitening_matrix)
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def normalize(self):
        m2 = self.m.copy()
        m2.data **= 2
        norm = np.reciprocal(np.sqrt(np.array(m2.sum(axis=1))[:, 0]))
        normalizer = dok_matrix((len(norm), len(norm)))
        normalizer.setdiag(norm)
        self.m = normalizer.tocsr().dot(self.m)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                assert_almost_equal(np.reciprocal(inp2),
                                    np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                np.add(inp1, 1, out=out)
                assert_almost_equal(out, exp1 + 1, err_msg=msg)
                np.add(1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)
项目:PyDMD    作者:mathLab    | 项目源码 | 文件源码
def _eig_from_lowrank_op(Atilde, Y, U, s, V, exact):
        """
        Private method that computes eigenvalues and eigenvectors of the
        high-dimensional operator from the low-dimensional operator and the
        input matrix.

        :param numpy.ndarray Atilde: the lowrank operator.
        :param numpy.ndarray Y: input matrix Y.
        :param numpy.ndarray U: 2D matrix that contains the left-singular
            vectors of X, stored by column.
        :param numpy.ndarray s: 1D array that contains the singular values of X.
        :param numpy.ndarray V: 2D matrix that contains the right-singular
            vectors of X, stored by row.
        :param bool exact: if True, the exact modes are computed; otherwise,
            the projected ones are computed.
        :return: eigenvalues, eigenvectors
        :rtype: numpy.ndarray, numpy.ndarray
        """
        lowrank_eigenvalues, lowrank_eigenvectors = np.linalg.eig(Atilde)

        # Compute the eigenvectors of the high-dimensional operator
        if exact:
            eigenvectors = (
                (Y.dot(V) * np.reciprocal(s)).dot(lowrank_eigenvectors)
            )
        else:
            eigenvectors = U.dot(lowrank_eigenvectors)

        # The eigenvalues are the same
        eigenvalues = lowrank_eigenvalues

        return eigenvalues, eigenvectors
项目:describe    作者:SINGROUP    | 项目源码 | 文件源码
def sine_matrix(self, system):
        """Creates the Sine matrix for the given system.
        """
        # Cell and inverse cell
        B = system.get_cell()
        B_inv = system.get_cell_inverse()

        # Difference vectors in tensor 3D-tensor-form
        diff_tensor = system.get_displacement_tensor()

        # Calculate phi
        arg_to_sin = np.pi * np.dot(diff_tensor, B_inv)
        phi = np.linalg.norm(np.dot(np.sin(arg_to_sin)**2, B), axis=2)

        with np.errstate(divide='ignore'):
            phi = np.reciprocal(phi)

        # Calculate Z_i*Z_j
        q = system.get_initial_charges()
        qiqj = q[None, :]*q[:, None]
        np.fill_diagonal(phi, 0)

        # Multiply by charges
        smat = qiqj*phi

        # Set diagonal
        np.fill_diagonal(smat, 0.5 * q ** 2.4)

        return smat
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reciprocal(input_tensor):
    """TODO."""
    p_u = input_tensor
    u = rng.uniform(.1, 5.0, p_u.axes)

    rec_u_np = np.reciprocal(u)
    rec_u = ng.reciprocal(p_u)

    with ExecutorFactory() as ex:
        rec_u_graph = ex.executor(rec_u, p_u)(u)
    ng.testing.assert_allclose(rec_u_np, rec_u_graph)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reciprocal_derivative(input_tensor):
    """TODO."""
    p_u = input_tensor

    delta = .001

    u = rng.uniform(.1, 5.0, p_u.axes)

    rec_u = ng.reciprocal(p_u)

    check_derivative(rec_u, p_u, delta, u, atol=1e-2, rtol=1e-2)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_variance_sqrt_inverse(input_tensor):
    inputs = input_tensor
    targets = ng.placeholder(inputs.axes)

    epsilon = 1e-3

    inp_stat = ng.reciprocal(
        ng.sqrt(
            ng.variance(inputs, reduction_axes=inputs.axes.batch_axes()) + epsilon
        )
    )
    err = ng.sum(inp_stat - targets, out_axes=())
    d_inputs = ng.deriv(err, inputs)
    with executor([err, d_inputs], inputs, targets) as comp_func:

        input_value = rng.uniform(-1, 1, inputs.axes)
        target_value = rng.uniform(-1, 1, targets.axes)
        ng_f_res, ng_b_res = comp_func(input_value, target_value)

        npv = np.var(input_value, axis=1, keepdims=True) + epsilon
        np_f_res = 1.0 / np.sqrt(npv)

        npv_delta = 2 * (input_value - np.mean(input_value, axis=1, keepdims=True))

        np_b_res = - 0.5 * np_f_res / npv * npv_delta

        np_f_res = np.sum(np_f_res - target_value)

        ng.testing.assert_allclose(np_f_res, ng_f_res, atol=1e-4, rtol=1e-4)
        ng.testing.assert_allclose(np_b_res, ng_b_res, atol=1e-4, rtol=1e-4)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reciprocal(input_data):
    expected_output = np.reciprocal(input_data)
    node = onnx.helper.make_node('Reciprocal', inputs=['x'], outputs=['y'])
    ng_results = convert_and_calculate(node, [input_data], [expected_output])
    assert np.allclose(ng_results, [expected_output])
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testCplxReciprocalGPU(self):
        shapes = [(5,4,3), (5,4), (5,), (1,)]
        for sh in shapes:
            x = ((np.random.randn(*sh) +
                  1j*np.random.randn(*sh)).astype(np.complex64))
            self._compareGpu(x, np.reciprocal, tf.reciprocal)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testCplxReciprocalGradGPU(self):
        shapes = [(5,4,3), (5,4), (5,), (1,)]
        for sh in shapes:
            x = ((np.random.randn(*sh) +
                  1j*np.random.randn(*sh)).astype(np.complex64))
            self._compareGpuGrad(x, np.reciprocal, tf.reciprocal)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                assert_almost_equal(np.reciprocal(inp2),
                                    np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                np.add(inp1, 1, out=out)
                assert_almost_equal(out, exp1 + 1, err_msg=msg)
                np.add(1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)
项目:power_flow    作者:BeierZhu    | 项目源码 | 文件源码
def BX(Y,index_PQ, index_P, n_PQ, n_P):
    case_number, _ = np.shape(Y)
    Y_p = Y.copy()
    X_p = np.zeros((case_number, case_number))
    B_p = np.zeros((n_P,n_P))
    B_pp = np.zeros((n_PQ,n_PQ))

    #-------------------------------------------------
    for i in xrange(case_number):
        Y_p[i][i] = complex(0,0)
        for j in xrange(case_number):
            if i != j:
                Y_p[i][i] -= Y_p[i][j]
    B = np.imag(Y_p)
    for i in xrange(n_P):
        for j in xrange(n_P):
            B_p[i][j] = B[index_P[i]][index_P[j]]
    #-------------------------------------------------
    g_b_round = np.zeros(case_number)
    for i in xrange(case_number):
        a = np.sum(Y[i])
        if LA.norm(a) > 1e-5:
            g_b_round[i] = np.reciprocal(np.imag(np.reciprocal(a)))

    for i in xrange(case_number):
        for j in xrange(case_number):
            if LA.norm(Y[i][j]) > 1e-5 and i!=j:
                X_p[i][j] = np.reciprocal(np.imag(np.reciprocal(Y[i][j])))
    for i in xrange(case_number):
        X_p[i][i] = g_b_round[i]
        for j in xrange(case_number):
            if i != j:
                X_p[i][i] -= X_p[i][j]
    for i in xrange(0, n_PQ):
        for j in xrange(0, n_PQ):
            B_pp[i][j] = X_p[index_PQ[i]][index_PQ[j]]

    return B_p, - B_pp

# Stott ================================================================================================================
# Stott Original--------------------------------------------------------------------------------------------------------
项目:power_flow    作者:BeierZhu    | 项目源码 | 文件源码
def XB(Y,index_PQ, index_P, n_PQ, n_P):
    case_number, _ = np.shape(Y)
    _, B = util.get_G_B(Y)
    X = np.zeros((case_number, case_number))
    B_p = np.zeros((n_P,n_P))
    B_pp = np.zeros((n_PQ,n_PQ))
    for i in xrange(case_number):
        for j in xrange(case_number):
            if LA.norm(Y[i][j]) > 1e-5 and i != j:
                X[i][j] = np.reciprocal(np.imag(np.reciprocal(Y[i][j])))

    for i in xrange(case_number):
        for j in xrange(case_number):
            if i != j:
                X[i][i] -= X[i][j]

    for i in xrange(0, n_P):
        for j in xrange(0, n_P):
            B_p[i][j] = X[index_P[i]][index_P[j]]
    #---------------------------------------------------
    for i in xrange(0, n_PQ):
        for j in xrange(0, n_PQ):
            B_pp[i][j] = B[index_PQ[i]][index_PQ[j]]

    return - B_p, B_pp

# Stott count r in B'---------------------------------------------------------------------------------------------------
项目:power_flow    作者:BeierZhu    | 项目源码 | 文件源码
def XB_ground(Y,index_PQ, index_P, n_PQ, n_P):
    case_number, _ = np.shape(Y)
    _, B = util.get_G_B(Y)
    X_p = np.zeros((case_number, case_number))
    B_p = np.zeros((n_P, n_P))
    B_pp = np.zeros((n_PQ, n_PQ))
    g_b_round = np.zeros(case_number)
    for i in xrange(case_number):
        a = np.sum(Y[i])
        if LA.norm(a) > 1e-5:
            g_b_round[i] = np.reciprocal(np.imag(np.reciprocal(a)))

    for i in xrange(case_number):
        for j in xrange(case_number):
            if LA.norm(Y[i][j]) > 1e-5 and i!=j:
                X_p[i][j] = np.reciprocal(np.imag(np.reciprocal(Y[i][j])))
    for i in xrange(case_number):
        X_p[i][i] = g_b_round[i]
        for j in xrange(case_number):
            if i != j:
                X_p[i][i] -= X_p[i][j]

    for i in xrange(0, n_P):
        for j in xrange(0, n_P):
            B_p[i][j] = X_p[index_P[i]][index_P[j]]
    # ---------------------------------------------------
    for i in xrange(0, n_PQ):
        for j in xrange(0, n_PQ):
            B_pp[i][j] = B[index_PQ[i]][index_PQ[j]]

    return - B_p, B_pp

# Stott ================================================================================================================
# XX--------------------------------------------------------------------------------------------------------------------
项目:power_flow    作者:BeierZhu    | 项目源码 | 文件源码
def XX(Y,index_PQ, index_P, n_PQ, n_P):
    case_number, _ = np.shape(Y)
    X_p = np.zeros((case_number, case_number))
    B_p = np.zeros((n_P,n_P))
    B_pp = np.zeros((n_PQ,n_PQ))
    for i in xrange(case_number):
        for j in xrange(case_number):
            if LA.norm(Y[i][j]) > 1e-5 and i != j:
                X_p[i][j] = np.reciprocal(np.imag(np.reciprocal(Y[i][j])))

    for i in xrange(case_number):
        for j in xrange(case_number):
            if i != j:
                X_p[i][i] -= X_p[i][j]

    for i in xrange(0, n_P):
        for j in xrange(0, n_P):
            B_p[i][j] = X_p[index_P[i]][index_P[j]]
    #-------------------------------------------------
    g_b_round = np.zeros(case_number)
    for i in xrange(case_number):
        a = np.sum(Y[i])
        if LA.norm(a) > 1e-5:
            g_b_round[i] = np.reciprocal(np.imag(np.reciprocal(a)))

    for i in xrange(case_number):
        for j in xrange(case_number):
            if LA.norm(Y[i][j]) > 1e-5 and i!=j:
                X_p[i][j] = np.reciprocal(np.imag(np.reciprocal(Y[i][j])))
    for i in xrange(case_number):
        X_p[i][i] = g_b_round[i]
        for j in xrange(case_number):
            if i != j:
                X_p[i][i] -= X_p[i][j]

    for i in xrange(0, n_PQ):
        for j in xrange(0, n_PQ):
            B_pp[i][j] = X_p[index_PQ[i]][index_PQ[j]]
    return - B_p, - B_pp
项目:power_flow    作者:BeierZhu    | 项目源码 | 文件源码
def BX(Y,index_PQ, index_P, n_PQ, n_P):
    case_number, _ = np.shape(Y)
    Y_p = Y.copy()
    # print Y
    X_p = np.zeros((case_number, case_number))
    B_p = np.zeros((n_P,n_P))
    B_pp = np.zeros((n_PQ,n_PQ))

    #-------------------------------------------------
    for i in xrange(case_number):
        Y_p[i][i] = complex(0,0)
        for j in xrange(case_number):
            if i != j:
                Y_p[i][i] -= Y_p[i][j]
    B = np.imag(Y_p)
    for i in xrange(n_P):
        for j in xrange(n_P):
            B_p[i][j] = B[index_P[i]][index_P[j]]
    #-------------------------------------------------
    g_b_round = np.zeros(case_number)
    for i in xrange(case_number):
        a = np.sum(Y[i])
        if LA.norm(a) > 1e-5:
            g_b_round[i] = np.reciprocal(np.imag(np.reciprocal(a)))

    for i in xrange(case_number):
        for j in xrange(case_number):
            if LA.norm(Y[i][j]) > 1e-5 and i!=j:
                X_p[i][j] = np.reciprocal(np.imag(np.reciprocal(Y[i][j])))
    for i in xrange(case_number):
        X_p[i][i] = g_b_round[i]
        for j in xrange(case_number):
            if i != j:
                X_p[i][i] -= X_p[i][j]
    for i in xrange(0, n_PQ):
        for j in xrange(0, n_PQ):
            B_pp[i][j] = X_p[index_PQ[i]][index_PQ[j]]

    return B_p, B_pp
项目:clinspell    作者:clips    | 项目源码 | 文件源码
def __init__(self, parameters, language):

        assert language in ["en", "nl"]
        self.language = language

        # load frequency list
        pathtofrequencies = 'frequencies_' + language + '.json'
        # load trained fasttext model
        pathtomodel = 'embeddings_' + language + '.bin'
        # give path to fasttext vectors
        pathtovectors = 'embeddings_' + language + '.vec'

        # PHASE 1
        self.comp_function = parameters['comp_function']  # item from ["sum", "mult", "max"]
        self.include_misspelling = parameters['include_misspelling']  # boolean
        self.include_oov_candidates = parameters['include_oov_candidates']  # boolean
        self.pathtovectors = pathtovectors  # path to fasttext vectors
        self.model = fasttext.load_model(pathtomodel)   # path to fasttext model

        # PHASE 2
        self.window_size = parameters['window_size']  # number in range(0,11)
        self.reciprocal = parameters['reciprocal']  # boolean
        self.remove_stopwords = parameters['remove_stopwords']  # boolean
        self.stopwords = frozenset(json.load(open('stopwords_' + str(self.language) + '.json', 'r')))

        # PHASE 3
        self.edit_distance = parameters['edit_distance']  # item from [1, 2, 3, 4]

        # PHASE 4
        self.oov_penalty = parameters['oov_penalty']  # oov penalty tuned with self.tune_oov()

        # OUTPUT
        self.ranking_method = parameters['ranking_method']  # item from ["context", "noisy_channel", "frequency",
        # "ensemble"]
        self.frequency_dict = json.load(open(pathtofrequencies, 'r'))  # path to frequency list
        self.k = parameters['k-best']  # positive natural number
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                assert_almost_equal(np.reciprocal(inp2),
                                    np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                np.add(inp1, 1, out=out)
                assert_almost_equal(out, exp1 + 1, err_msg=msg)
                np.add(1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)
项目:PyRBF    作者:srowe12    | 项目源码 | 文件源码
def KernelRadial(self, r):
        return np.reciprocal(np.sqrt(1+self.gamma*r))
项目:PyRBF    作者:srowe12    | 项目源码 | 文件源码
def KernelRadial(self, r):
        return np.reciprocal(1+self.gamma*r)
项目:Steal-ML    作者:ftramer    | 项目源码 | 文件源码
def predict_probas(X, w, intercept, multinomial=True):
    """
    Predict probabilities for each class using either a multinomial or a
    one-vs-rest approach
    """

    #print X.shape
    #print w.shape
    #print intercept.shape

    p = safe_sparse_dot(X, w.T, dense_output=True) + intercept

    if multinomial:
        return softmax(p, copy=False)
    else:
        p = p.ravel() if p.shape[1] == 1 else p

        p *= -1
        np.exp(p, p)
        p += 1
        np.reciprocal(p, p)

        if p.ndim == 1:
            return np.vstack([1 - p, p]).T
        else:
            # OvR normalization, like LibLinear's predict_probability
            p /= p.sum(axis=1).reshape((p.shape[0], -1))
            return p
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                assert_almost_equal(np.reciprocal(inp2),
                                    np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                np.add(inp1, 1, out=out)
                assert_almost_equal(out, exp1 + 1, err_msg=msg)
                np.add(1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                assert_almost_equal(np.reciprocal(inp2),
                                    np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                np.add(inp1, 1, out=out)
                assert_almost_equal(out, exp1 + 1, err_msg=msg)
                np.add(1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)
项目:singing_horse    作者:f0k    | 项目源码 | 文件源码
def sigmoid(x, out):
        if out is not x:
            out[:] = x
        np.negative(out, out)
        np.exp(out, out)
        out += 1
        np.reciprocal(out, out)
        return out
项目:ngram2vec    作者:zhezhaoa    | 项目源码 | 文件源码
def calc_pmi(counts, cds):
    sum_w = np.array(counts.sum(axis=1))[:, 0]
    sum_c = np.array(counts.sum(axis=0))[0, :]
    if cds != 1:
        sum_c = sum_c ** cds
    sum_total = sum_c.sum()
    sum_w = np.reciprocal(sum_w)
    sum_c = np.reciprocal(sum_c)

    pmi = csr_matrix(counts)
    pmi = multiply_by_rows(pmi, sum_w)
    pmi = multiply_by_columns(pmi, sum_c)
    pmi = pmi * sum_total
    return pmi
项目:ngram2vec    作者:zhezhaoa    | 项目源码 | 文件源码
def normalize(self):
        m2 = self.m.copy()
        m2.data **= 2
        norm = np.reciprocal(np.sqrt(np.array(m2.sum(axis=1))[:, 0]))
        normalizer = dok_matrix((len(norm), len(norm)))
        normalizer.setdiag(norm)
        self.m = normalizer.tocsr().dot(self.m)
项目:crankshaft    作者:CartoDB    | 项目源码 | 文件源码
def numerator(center_coords, data_i):
    """

    """
    return np.reciprocal(np.linalg.norm(center_coords - data_i))