我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用scipy.sparse.linalg.eigs()。
def part_factor(As): """Compute participation factor of states in eigenvalues""" mu, N = eigs(As) N = matrix(N) n = len(mu) idx = range(n) W = matrix(spmatrix(1.0, idx, idx, (n, n), N.typecode)) gesv(N, W) partfact = mul(abs(W.T), abs(N)) b = matrix(1.0, (1, n)) WN = b * partfact partfact = partfact.T for item in idx: mu_real = mu[item].real mu_imag = mu[item].imag mu[item] = complex(round(mu_real, 4), round(mu_imag, 4)) partfact[item, :] /= WN[item] # participation factor: return matrix(mu), matrix(partfact)
def rv_pca(data, n_datasets): """ Get weights for tables by calculating their similarity. :return: """ print("Rv-PCA") C = np.zeros([n_datasets, n_datasets]) print("Rv-PCA: Hilbert-Schmidt inner products... ", end='') for i in range(n_datasets): for j in range(n_datasets): C[i, j] = np.sum(data[i].affinity_ * data[j].affinity_) print('Done!') print("Rv-PCA: Decomposing the inner product matrix... ", end ='') e, u = eigs(C, k=8) print("Done!") weights = (np.real(u[:,0]) / np.sum(np.real(u[:,0]))).flatten() print("Rv-PCA: Done!") return weights, np.real(e), np.real(u)
def aniso_c1(X, M): """ Calculate weights using ANISOSTATIS criterion 1 :param X: :param M: :return: New weights """ print("Computing ANISOSTATIS Criterion 1 weights...", end='') C = np.power(X.T.dot(M.dot(X)), 2) ev, Mn = eigs(C, k=1) Mn = np.real(Mn) Mn = np.concatenate(Mn / np.sum(Mn)) print('Done!') return Mn, ev
def learn_embedding(self, graph=None, edge_f=None, is_weighted=False, no_python=False): if not graph and not edge_f: raise Exception('graph/edge_f needed') if not graph: graph = graph_util.loadGraphFromEdgeListTxt(edge_f) graph = graph.to_undirected() t1 = time() L_sym = nx.normalized_laplacian_matrix(graph) w, v = lg.eigs(L_sym, k=self._d + 1, which='SM') t2 = time() self._X = v[:, 1:] p_d_p_t = np.dot(v, np.dot(np.diag(w), v.T)) eig_err = np.linalg.norm(p_d_p_t - L_sym) print('Laplacian matrix recon. error (low rank): %f' % eig_err) return self._X, (t2 - t1)
def fit(self, X, Y): self.N = min(self.N, X.shape[1]-2) y_max = int(np.max(Y) + 1) self.W = np.zeros((X.shape[1], self.N*y_max*(y_max-1)), dtype=X.dtype) off = 0 for i in range(y_max): Xi = X[Y == i] covi = np.dot(Xi.T, Xi) covi /= np.float32(Xi.shape[0]) for j in range(y_max): if j == i: continue if self.verbose: print("Finding eigenvectors for pair ({}/{})".format(i,j)) Xj = X[Y == j] covj = np.dot(Xj.T, Xj) / np.float32(Xj.shape[0]) E = np.linalg.pinv(np.linalg.cholesky(covj + np.eye(covj.shape[0]) * self.precond).T) C = np.dot(np.dot(E.T, covi), E) C2 = 0.5 * (C + C.T) S,U = eigs(C2, self.N) gev = np.dot(E, U[:, :self.N]) self.W[:, off:off+self.N] = np.real(gev) off += self.N if self.verbose: print("DONE") return self
def IntWeights(N, M,connectivity): succ = False while not succ: try: W_raw = sparse.rand(N, M ,format='lil', density=connectivity ) rows,cols = W_raw.nonzero() for row,col in zip(rows,cols): W_raw[row,col] = np.random.randn() specRad,eigenvecs = np.abs(lin.eigs(W_raw,1)) W_raw = np.squeeze(np.asarray(W_raw/specRad)) succ = True return W_raw except: pass
def get_eigen_vector(self): # w,v = np.linalg.eig(self.M) # print(max(w),w) # max_ind = np.where(w==max(w))[0][0] # print(max_ind) # self.w_inf = v[:,max_ind] # rearrangedEvalsVecs = sorted(zip(evals,evecs.T),\ # key=lambda x: x[0].real, reverse=True) self.w_inf = eigs(self.M.T,1)[1].flatten() #make sum 1: # print(self.w_inf[:5]) self.w_inf = self.w_inf / np.sum(self.w_inf) print(self.w_inf[:5]) # print(self.w_inf.shape)
def compute_eigenvectors(laplacian): # csr_matrix in scipy means compressed matrix laplacian_sparse = sparse.csr_matrix(laplacian) # linalg is the linear algebra module in scipy.sparse # eigs takes a matrix and # returns (array of eigenvalues, array of eigenvectors) return linalg.eigs(laplacian_sparse)
def eigs(As): """Solve eigenvalues from state matrix""" # if (not SLEPC) or (not SLEPC): return numpy.linalg.eig(As) # try: As = sparse(As) As = scipy.sparse.csc_matrix((numpy.array(As.CCS[2]).flatten(), numpy.array(As.CCS[1]).flatten(), numpy.array(As.CCS[0]).flatten() ), shape=As.size) # return linalg.eigs(As, k=1326) As_csr = As.tocsr() opts = PETSc.Options() A = PETSc.Mat().createAIJ(size=As_csr.shape, csr=(As_csr.indptr, As_csr.indices, As_csr.data)) A.setFromOptions() A.setUp() xr, tmp = A.getVecs() xi, tmp = A.getVecs() # Setup the eigensolver E = SLEPc.EPS().create() E.setOperators(A, None) E.setDimensions(500, PETSc.DECIDE) E.setProblemType( SLEPc.EPS.ProblemType.GNHEP ) E.setFromOptions() # Solve the eigensystem E.solve() Print = PETSc.Sys.Print Print("") its = E.getIterationNumber() Print("Number of iterations of the method: %i" % its) sol_type = E.getType() Print("Solution method: %s" % sol_type) nev, ncv, mpd = E.getDimensions() Print("Number of requested eigenvalues: %i" % nev) tol, maxit = E.getTolerances() Print("Stopping condition: tol=%.4g, maxit=%d" % (tol, maxit)) nconv = E.getConverged() Print("Number of converged eigenpairs: %d" % nconv) if nconv > 0: Print("") Print(" k ||Ax-kx||/||kx|| ") Print("----------------- ------------------") for i in range(nconv): k = E.getEigenpair(i, xr, xi) error = E.computeError(i) if k.imag != 0.0: Print(" %9f%+9f j %12g" % (k.real, k.imag, error)) else: Print(" %12f %12g" % (k.real, error)) Print("") # except: # return numpy.linalg.eigvals(As)
def eigenMethod(self, tol = 1e-8, maxiter = 1e5): """ Determines ``pi`` by searching for the eigenvector corresponding to the first eigenvalue, using the :func:`eigs` function. The result is stored in the class attribute ``pi``. Parameters ---------- tol : float, optional(default=1e-8) Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``. maxiter : int, optional(default=1e5) The maximum number of iterations to be carried out. Example ------- >>> P = np.array([[0.5,0.5],[0.6,0.4]]) >>> mc = markovChain(P) >>> mc.eigenMethod() >>> print(mc.pi) [ 0.54545455 0.45454545] Remarks ------- The speed of convergence depends heavily on the choice of the initial guess for ``pi``. Here we let the initial ``pi`` be a vector of ones. For large state spaces, this method may not work well. At the moment, we call :func:`powerMethod` if the number of states is 2. Code is due to a colleague: http://nicky.vanforeest.com/probability/markovChains/markovChain.html """ Q = self.getIrreducibleTransitionMatrix(probabilities=False) if Q.shape == (1, 1): self.pi = np.array([1.0]) return if Q.shape == (2, 2): self.pi= np.array([Q[1,0],Q[0,1]]/(Q[0,1]+Q[1,0])) return size = Q.shape[0] guess = np.ones(size,dtype=float) w, v = eigs(Q.T, k=1, v0=guess, sigma=1e-6, which='LM',tol=tol, maxiter=maxiter) pi = v[:, 0].real pi /= pi.sum() self.pi = pi