Python numpy 模块,diagflat() 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用numpy.diagflat()

项目:ESL-Model    作者:littlezz    | 项目源码 | 文件源码
def train(self):
        X = self.train_x
        y = self.train_y
        # include intercept
        beta = np.zeros((self.p+1, 1))

        iter_times = 0
        while True:
            e_X = np.exp(X @ beta)
            # N x 1
            self.P = e_X / (1 + e_X)
            # W is a vector
            self.W = (self.P * (1 - self.P)).flatten()
            # X.T * W equal (X.T @ diagflat(W)).diagonal()
            beta = beta + self.math.pinv((X.T * self.W) @ X) @ X.T @ (y - self.P)

            iter_times += 1
            if iter_times > self.max_iter:
                break

        self.beta_hat = beta
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def latent_correlation(self):
        """Compute correlation matrix among latent features.

        This computes the generalization of Pearson's correlation to discrete
        data. Let I(X;Y) be the mutual information. Then define correlation as

          rho(X,Y) = sqrt(1 - exp(-2 I(X;Y)))

        Returns:
            A [V, V]-shaped numpy array of feature-feature correlations.
        """
        logger.debug('computing latent correlation')
        V, E, M, R = self._VEMR
        edge_probs = self._edge_probs
        vert_probs = self._vert_probs
        result = np.zeros([V, V], np.float32)
        for root in range(V):
            messages = np.empty([V, M, M])
            program = make_propagation_program(self._tree.tree_grid, root)
            for op, v, v2, e in program:
                if op == OP_ROOT:
                    # Initialize correlation at this node.
                    messages[v, :, :] = np.diagflat(vert_probs[v, :])
                elif op == OP_OUT:
                    # Propagate correlation outward from parent to v.
                    trans = edge_probs[e, :, :]
                    if v > v2:
                        trans = trans.T
                    messages[v, :, :] = np.dot(  #
                        trans / vert_probs[v2, np.newaxis, :],
                        messages[v2, :, :])
            for v in range(V):
                result[root, v] = correlation(messages[v, :, :])
        return result
项目:ESL-Model    作者:littlezz    | 项目源码 | 文件源码
def train(self):
        super().train()
        sigma = self.Sigma_hat
        D_, U = LA.eigh(sigma)
        D = np.diagflat(D_)
        self.A = np.power(LA.pinv(D), 0.5) @ U.T
项目:ESL-Model    作者:littlezz    | 项目源码 | 文件源码
def train(self):
        super().train()
        W = self.Sigma_hat
        # prior probabilities (K, 1)
        Pi = self.Pi
        # class centroids (K, p)
        Mu = self.Mu
        p = self.p
        # the number of class
        K = self.n_class
        # the dimension you want
        L = self.L

        # Mu is (K, p) matrix, Pi is (K, 1)
        mu = np.sum(Pi * Mu, axis=0)
        B = np.zeros((p, p))

        for k in range(K):
            # vector @ vector equal scalar, use vector[:, None] to transform to matrix
            # vec[:, None] equal to vec.reshape((1, vec.shape[0]))
            B = B + Pi[k]*((Mu[k] - mu)[:, None] @ ((Mu[k] - mu)[None, :]))

        # Be careful, the `eigh` method get the eigenvalues in ascending , which is opposite to R.
        Dw, Uw = LA.eigh(W)
        # reverse the Dw_ and Uw
        Dw = Dw[::-1]
        Uw = np.fliplr(Uw)

        W_half = self.math.pinv(np.diagflat(Dw**0.5) @ Uw.T)
        B_star = W_half.T @ B @ W_half
        D_, V = LA.eigh(B_star)

        # reverse V
        V = np.fliplr(V)

        # overwrite `self.A` so that we can reuse `predict` method define in parent class
        self.A = np.zeros((L, p))
        for l in range(L):
            self.A[l, :] = W_half @ V[:, l]
项目:backtrackbb    作者:BackTrackBB    | 项目源码 | 文件源码
def _update_(U, D, d, lambda_):
    """Go from u(n) to u(n+1)."""
    I = np.identity(3)

    m = U.T.dot(d)
    p = (I - U.dot(U.T)).dot(d)
    p_norm = np.linalg.norm(p)

    # Make p and m column vectors
    p = p[np.newaxis].T
    m = m[np.newaxis].T

    U_left = np.hstack((U, p/p_norm))
    Q = np.hstack((lambda_ * D, m))
    Q = np.vstack((Q, [0, 0, p_norm]))

    # SVD
    U_right, D_new, V_left = np.linalg.svd(Q)

    # Get rid of the smallest eigenvalue
    D_new = D_new[0:2]
    D_new = np.diagflat(D_new)

    U_right = U_right[:, 0:2]

    return U_left.dot(U_right), D_new
项目:backtrackbb    作者:BackTrackBB    | 项目源码 | 文件源码
def rosenberger(dataX, dataY, dataZ, lambda_):
    """
    Separate P and non-P wavefield from 3-component data.

    Return a two set of 3-component traces.
    """
    # Construct the data matrix
    A = np.vstack((dataZ, dataX, dataY))

    # SVD of the first 3 samples:
    U, D, V = np.linalg.svd(A[:, 0:3])

    # Get rid of the smallest eigenvalue
    D = D[0:2]
    D = np.diagflat(D)
    U = U[:, 0:2]

    save_U = np.zeros(len(dataX))
    save_U[0] = abs(U[0, 0])

    Dp = np.zeros((3, len(dataX)))
    Ds = np.zeros((3, len(dataX)))
    Dp[:, 0] = abs(U[0, 0]) * A[:, 2]
    Ds[:, 0] = (1 - abs(U[0, 0])) * A[:, 2]

    # Loop over all the values
    for i in range(1, A.shape[1]):
        d = A[:, i]
        U, D = _update_(U, D, d, lambda_)

        Dp[:, i] = abs(U[0, 0]) * d
        Ds[:, i] = (1-abs(U[0, 0])) * d

        save_U[i] = abs(U[0, 0])

    return Dp, Ds, save_U
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def eye(n): return diagflat(ones(n))
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def diagflat(a, k=0):
 if isinstance(a, garray): return a.diagflat(k)
 else: return numpy.diagflat(a,k)
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def diagflat(self, k=0):
  if self.ndim!=1: return self.ravel().diagflat(k)
  if k!=0: raise NotImplementedError('k!=0 for garray.diagflat')
  selfSize = self.size
  ret = zeros((selfSize, selfSize))
  ret.ravel()[:-1].reshape((selfSize-1, selfSize+1))[:, 0] = self[:-1]
  if selfSize!=0: ret.ravel()[-1] = self[-1]
  return ret
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def diagonal(self):
  if self.ndim==1: return self.diagflat()
  if self.ndim==2:
   if self.shape[0] > self.shape[1]: return self[:self.shape[1]].diagonal()
   if self.shape[1] > self.shape[0]: return self[:, :self.shape[0]].diagonal()
   return self.ravel()[::self.shape[0]+1]
  raise NotImplementedError('garray.diagonal for arrays with ndim other than 1 or 2.')
项目:DeepNeuralNet-QSAR    作者:Merck    | 项目源码 | 文件源码
def eye(n): return diagflat(ones(n))
项目:DeepNeuralNet-QSAR    作者:Merck    | 项目源码 | 文件源码
def diagflat(self, k=0):
  if self.ndim!=1: return self.ravel().diagflat(k)
  if k!=0: raise NotImplementedError('k!=0 for garray.diagflat')
  selfSize = self.size
  ret = zeros((selfSize, selfSize))
  ret.ravel()[:-1].reshape((selfSize-1, selfSize+1))[:, 0] = self[:-1]
  if selfSize!=0: ret.ravel()[-1] = self[-1]
  return ret
项目:DeepNeuralNet-QSAR    作者:Merck    | 项目源码 | 文件源码
def diagonal(self):
  if self.ndim==1: return self.diagflat()
  if self.ndim==2:
   if self.shape[0] > self.shape[1]: return self[:self.shape[1]].diagonal()
   if self.shape[1] > self.shape[0]: return self[:, :self.shape[0]].diagonal()
   return self.ravel()[::self.shape[0]+1]
  raise NotImplementedError('garray.diagonal for arrays with ndim other than 1 or 2.')
项目:alchemlyb    作者:alchemistry    | 项目源码 | 文件源码
def fit(self, dHdl):
        """
        Compute free energy differences between each state by integrating
        dHdl across lambda values.

        Parameters
        ----------
        dHdl : DataFrame 
            dHdl[n,k] is the potential energy gradient with respect to lambda
            for each configuration n and lambda k.

        """

        # sort by state so that rows from same state are in contiguous blocks,
        # and adjacent states are next to each other
        dHdl = dHdl.sort_index(level=dHdl.index.names[1:])

        # obtain the mean and variance of the mean for each state
        # variance calculation assumes no correlation between points
        # used to calculate mean
        means = dHdl.mean(level=dHdl.index.names[1:])
        variances = np.square(dHdl.sem(level=dHdl.index.names[1:]))

        # obtain vector of delta lambdas between each state
        dl = means.reset_index()[means.index.names[:]].diff().iloc[1:].values

        # apply trapezoid rule to obtain DF between each adjacent state
        deltas = (dl * (means.iloc[:-1].values + means.iloc[1:].values)/2).sum(axis=1)
        d_deltas = (dl**2 * (variances.iloc[:-1].values + variances.iloc[1:].values)/4).sum(axis=1)

        # build matrix of deltas between each state
        adelta = np.zeros((len(deltas)+1, len(deltas)+1))
        ad_delta = np.zeros_like(adelta)

        for j in range(len(deltas)):
            out = []
            dout = []
            for i in range(len(deltas) - j):
                out.append(deltas[i] + deltas[i+1:i+j+1].sum())
                dout.append(d_deltas[i] + d_deltas[i+1:i+j+1].sum())

            adelta += np.diagflat(np.array(out), k=j+1)
            ad_delta += np.diagflat(np.array(dout), k=j+1)

        # yield standard delta_f_ free energies between each state
        self.delta_f_ = pd.DataFrame(adelta - adelta.T,
                                     columns=means.index.values,
                                     index=means.index.values)

        # yield standard deviation d_delta_f_ between each state
        self.d_delta_f_ = pd.DataFrame(np.sqrt(ad_delta + ad_delta.T),
                                       columns=variances.index.values,
                                       index=variances.index.values)

        self.states_ = means.index.values.tolist()

        return self
项目:prep    作者:ysyushi    | 项目源码 | 文件源码
def simplex_project(y, infinitesimal):
    # 1-D vector version
    # D = len(y)
    # u = np.sort(y)[::-1]
    # x_tmp = (1. - np.cumsum(u)) / np.arange(1, D+1)
    # lmd = x_tmp[np.sum(u + x_tmp > 0) - 1]
    # return np.maximum(y + lmd, 0)

    n, d = y.shape
    x = np.fliplr(np.sort(y, axis=1))
    x_tmp = np.dot((np.cumsum(x, axis=1) + (d * infinitesimal - 1.)), np.diagflat(1. / np.arange(1, d + 1)))
    lmd = x_tmp[np.arange(n), np.sum(x > x_tmp, axis=1) - 1]
    return np.maximum(y - lmd[:, np.newaxis], 0) + infinitesimal
项目:linear-algebra-methods    作者:angellicacardozo    | 项目源码 | 文件源码
def JCB(A,b,N=25,x=None):

    if x is None:
        x = zeros(len(A[0]))

    D = diag(A)
    R = A - diagflat(D)

    for i in range(N):
        x = (b - dot(R,x))/D

    pprint(x)
    return x
项目:DeepNeuralNet-QSAR    作者:Merck    | 项目源码 | 文件源码
def diagflat(a, k=0):
 if isinstance(a, garray): return a.diagflat(k)
 else: return numpy.diagflat(a,k)