Python networkx 模块,from_numpy_matrix() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用networkx.from_numpy_matrix()

项目:TextRankPlus    作者:zuoxiaolei    | 项目源码 | 文件源码
def sort_sentences(sentences, words,model, pagerank_config = {'alpha': 0.85,}):
    """???????????????

    Keyword arguments:
    sentences         --  ????????
    words             --  ?????????sentences???????????????
    sim_func          --  ????????????????????????
    pagerank_config   --  pagerank???
    """
    sorted_sentences = []
    _source = words
    sentences_num = len(_source)
    graph = np.zeros((sentences_num, sentences_num))

    for x in xrange(sentences_num):
        for y in xrange(x, sentences_num):
            similarity = get_similarity( _source[x], _source[y], model)
            graph[x, y] = similarity
            graph[y, x] = similarity
    nx_graph = nx.from_numpy_matrix(graph)
    scores = nx.pagerank(nx_graph, **pagerank_config)              # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)

    for index, score in sorted_scores:
        item = AttrDict(index=index, sentence=sentences[index], weight=score)
        sorted_sentences.append(item)

    return sorted_sentences
项目:TextRankPlus    作者:zuoxiaolei    | 项目源码 | 文件源码
def sort_sentences(sentences, words, sim_func = get_similarity, pagerank_config = {'alpha': 0.85,}):
    """???????????????

    Keyword arguments:
    sentences         --  ????????
    words             --  ?????????sentences???????????????
    sim_func          --  ????????????????????????
    pagerank_config   --  pagerank???
    """
    sorted_sentences = []
    _source = words
    sentences_num = len(_source)
    graph = np.zeros((sentences_num, sentences_num))

    for x in xrange(sentences_num):
        for y in xrange(x, sentences_num):
            similarity = sim_func( _source[x], _source[y] )
            graph[x, y] = similarity
            graph[y, x] = similarity

    nx_graph = nx.from_numpy_matrix(graph)
    scores = nx.pagerank(nx_graph, **pagerank_config)              # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)

    for index, score in sorted_scores:
        item = AttrDict(index=index, sentence=sentences[index], weight=score)
        sorted_sentences.append(item)

    return sorted_sentences
项目:pyconnectome    作者:neurospin    | 项目源码 | 文件源码
def create_graph(connectome, labels):
    """ Create a graph structure from the connectome matrix.

    Parameters
    ----------
    connectome: array (N, N)
        a matrix representing the structural connections.
    labels: list of str (N,)
        the labels used to create the connectome matrix.

    Returns
    -------
    graph: Graph
        a graph structure.
    """
    graph = nx.from_numpy_matrix(connectome)
    for index, name in enumerate(labels):
        name = name.rstrip("\n")
        graph.node[index] = {"label": name}
    return graph
项目:graphpca    作者:brandones    | 项目源码 | 文件源码
def test_similar_output_to_naive_mat_3(self):
        mat = scipy.io.loadmat('bcspwr01.mat')
        # I love the UFSMC (https://www.cise.ufl.edu/research/sparse/matrices/)
        # but wow they really buried the matrix in this .mat
        A = mat['Problem'][0][0][1].todense()
        G = nx.from_numpy_matrix(A)
        G3 = graphpca.reduce_graph_efficiently(G, 3)
        G3n = graphpca.reduce_graph_naively(G, 3)
        self.assertTrue(np.allclose(G3, G3n, rtol=1e-04, atol=1e-06),
                        'Regular result:\n{}\nNaive result:\n{}\n'.format(G3, G3n))
项目:graphpca    作者:brandones    | 项目源码 | 文件源码
def test_add_supernode_similar_output_to_naive_mat_3(self):
        mat = scipy.io.loadmat('bcspwr01.mat')
        A = mat['Problem'][0][0][1].todense()
        G = nx.from_numpy_matrix(A)
        G3 = graphpca.reduce_graph_efficiently(G, 3, add_supernode=True)
        G3n = graphpca.reduce_graph_naively(G, 3)
        self.assertTrue(np.allclose(G3, G3n, rtol=1e-02, atol=1e-06),
                        'Regular result:\n{}\nNaive result:\n{}\n'.format(G3, G3n))
项目:GVIN    作者:sufengniu    | 项目源码 | 文件源码
def astar_len(graph, width, height, startx, starty, targetx, targety):
    adj = adjecent_2DGridWorld(graph, width, height)
    G = nx.from_numpy_matrix(adj)
    return nx.astar_path_length(G, starty*width+startx, targety*width+targetx)
项目:GVIN    作者:sufengniu    | 项目源码 | 文件源码
def nx_plot(adj, pos, value):
    # input: adjacent matrix, position, value map
    label = np.arange(len(pos))
    G=nx.from_numpy_matrix(adj)

    nx.draw_networkx_nodes(G, pos, node_color = value)
    nx.draw_networkx_labels(G, pos)
    nx.draw_networkx_edges(G, pos)
    plt.ion()
    plt.show()
项目:GVIN    作者:sufengniu    | 项目源码 | 文件源码
def astar_len(adj, start, target):
    G = nx.from_numpy_matrix(adj)
    return nx.astar_path_length(G, start, target)

# Data
项目:GVIN    作者:sufengniu    | 项目源码 | 文件源码
def nx_plot(adj, pos, value):
    # input: adjacent matrix, position, value map
    label = np.arange(len(pos))
    G=nx.from_numpy_matrix(adj)

    nx.draw_networkx_nodes(G, pos, node_color = value)
    nx.draw_networkx_labels(G, pos)
    nx.draw_networkx_edges(G, pos, width=1.0)
    plt.ion()
    plt.show()
项目:GVIN    作者:sufengniu    | 项目源码 | 文件源码
def nx_plot(adj, pos, value):
    # input: adjacent matrix, position, value map
    label = np.arange(len(pos))
    G=nx.from_numpy_matrix(adj)

    nodes = nx.draw_networkx_nodes(G, pos, node_color=value, node_size=200)
    nodes.set_edgecolor('black')
    nx.draw_networkx_labels(G, pos, font_size=10)
    nx.draw_networkx_edges(G, pos, width=1.0)
    plt.ion()
    plt.show()
项目:JustCopy    作者:exe1023    | 项目源码 | 文件源码
def sort_sentences(sentences, words, sim_func = get_similarity, pagerank_config = {'alpha': 0.85,}):
    """???????????????

    Keyword arguments:
    sentences         --  ????????
    words             --  ?????????sentences???????????????
    sim_func          --  ????????????????????????
    pagerank_config   --  pagerank???
    """
    sorted_sentences = []
    _source = words
    sentences_num = len(_source)        
    graph = np.zeros((sentences_num, sentences_num))

    for x in xrange(sentences_num):
        for y in xrange(x, sentences_num):
            similarity = sim_func( _source[x], _source[y] )
            graph[x, y] = similarity
            graph[y, x] = similarity

    nx_graph = nx.from_numpy_matrix(graph)
    scores = nx.pagerank(nx_graph, **pagerank_config)              # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)

    for index, score in sorted_scores:
        item = AttrDict(index=index, sentence=sentences[index], weight=score)
        sorted_sentences.append(item)

    return sorted_sentences
项目:gym-kidney    作者:camoy    | 项目源码 | 文件源码
def _load_data(self):
        # adjacency matrix
        adj = np.loadtxt(self.data, delimiter = ",")
        self._ref = nx.DiGraph()
        self._ref = nx.from_numpy_matrix(adj, create_using = self._ref)

        # vertex attributes
        with open(self.details, mode = "r") as handle:
            read = csv.reader(handle)
            for row in read:
                u = self._ref.node[int(row[0])]
                u["ndd"] = row[1] == "1"
                u["bp"] = row[2]
                u["bd"] = row[3]
项目:gym-kidney    作者:camoy    | 项目源码 | 文件源码
def _load_data(self):
        # adjacency matrix
        adj = np.loadtxt(self.data, delimiter = ",")
        self._ref = nx.DiGraph()
        self._ref = nx.from_numpy_matrix(adj, create_using = self._ref)

        # vertex attributes
        with open(self.details, mode = "r") as handle:
            read = csv.reader(handle)
            for row in read:
                u = self._ref.node[int(row[0])]
                u["ndd"] = row[1] == "1"
                u["bp"] = row[2]
                u["bd"] = row[3]
项目:gym-kidney    作者:camoy    | 项目源码 | 文件源码
def _load_data(self):
        # adjacency matrix
        adj = np.loadtxt(self.data, delimiter = ",")
        self._ref = nx.DiGraph()
        self._ref = nx.from_numpy_matrix(adj, create_using = self._ref)

        # vertex attributes
        with open(self.details, mode = "r") as handle:
            read = csv.reader(handle)
            for row in read:
                u = self._ref.node[int(row[0])]
                u["ndd"] = row[1] == "1"
                u["bp"] = row[2]
                u["bd"] = row[3]
项目:BrainModulyzer    作者:sugeerth    | 项目源码 | 文件源码
def __init__(self,data):
        super(GraphVisualization, self).__init__()
        self.data = data
        self.G = nx.from_numpy_matrix(self.data)  
        self.DrawHighlightedGraph()
项目:BrainModulyzer    作者:sugeerth    | 项目源码 | 文件源码
def Find_HighlightedEdges(self,weight = -0.54):
        self.ThresholdData = np.copy(self.data)
        low_values_indices = self.ThresholdData < weight  # Where values are low
        self.ThresholdData[low_values_indices] = 0
        self.g = nx.from_numpy_matrix(self.ThresholdData)
项目:ECoG-ClusterFlow    作者:sugeerth    | 项目源码 | 文件源码
def __init__(self,data):
        super(GraphVisualization, self).__init__()
        self.data = data
        self.G = nx.from_numpy_matrix(self.data)  
        self.DrawHighlightedGraph()
项目:ECoG-ClusterFlow    作者:sugeerth    | 项目源码 | 文件源码
def setG(self):
        self.G = nx.from_numpy_matrix(self.data)
项目:ECoG-ClusterFlow    作者:sugeerth    | 项目源码 | 文件源码
def Find_HighlightedEdges(self,weight = 0):
        self.ThresholdData = np.copy(self.data)
        # low_values_indices = self.ThresholdData < weight  # Where values are low
        # self.ThresholdData[low_values_indices] = 0
    # graterindices = [ (i,j) for i,j in np.ndenumerate(self.ThresholdData) if any(i > j) ] 
        # self.ThresholdData[graterindices[:1]] = 0
        # self.ThresholdData = np.tril(self.ThresholdData)
        # print self.ThresholdData, "is the data same??" 
        """
        test 2 highlighted edges there
        """
        # np.savetxt('test2.txt', self.ThresholdData, delimiter=',', fmt='%1.4e')
        self.g = nx.from_numpy_matrix(self.ThresholdData)
项目:ECoG-ClusterFlow    作者:sugeerth    | 项目源码 | 文件源码
def returnThresholdedDataValues(data, weight = 0):
    ThresholdData = np.copy(data)
    low_values_indices = ThresholdData < weight  # Where values are low
    ThresholdData[low_values_indices] = 0
    return nx.from_numpy_matrix(ThresholdData)
项目:AIZooService    作者:zhanglbjames    | 项目源码 | 文件源码
def sort_sentences(sentences, words, sim_func = get_similarity, pagerank_config = {'alpha': 0.85,}):
    """???????????????

    Keyword arguments:
    sentences         --  ????????
    words             --  ?????????sentences???????????????
    sim_func          --  ????????????????????????
    pagerank_config   --  pagerank???
    """
    sorted_sentences = []
    _source = words
    sentences_num = len(_source)        
    graph = np.zeros((sentences_num, sentences_num))

    for x in xrange(sentences_num):
        for y in xrange(x, sentences_num):
            similarity = sim_func( _source[x], _source[y] )
            graph[x, y] = similarity
            graph[y, x] = similarity

    nx_graph = nx.from_numpy_matrix(graph)
    scores = nx.pagerank(nx_graph, **pagerank_config)              # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)

    for index, score in sorted_scores:
        item = AttrDict(index=index, sentence=sentences[index], weight=score)
        sorted_sentences.append(item)

    return sorted_sentences
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def nxG(y):
    if type(y) is np.ndarray:
        if (y == y.T).all():
            # Undirected Graph
            typeG = nx.Graph()
        else:
            # Directed Graph
            typeG = nx.DiGraph()
        G = nx.from_numpy_matrix(y, create_using=typeG)
    else:
        G = y
    return G


# Global settings
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def getG(self):
        if not hasattr(self, 'G'):
            if self.is_symmetric():
                # Undirected Graph
                typeG = nx.Graph()
            else:
                # Directed Graph
                typeG = nx.DiGraph()
            self.G = nx.from_numpy_matrix(self.data, create_using=typeG)
            #self.G = nx.from_scipy_sparse_matrix(self.data, typeG)
        return self.G
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def to_directed(self):
        ''' Return self verion of graph wehre all links are flatened '''
        if self.is_symmetric():
            return self.getG()
        else:
            # nx to_undirected nedd a linkks in both side.
            return nx.from_numpy_matrix(self.data, create_using=nx.Graph())

    #
    # Get Statistics
    #
项目:nmp_qc    作者:priba    | 项目源码 | 文件源码
def plot_graph(self, am, position=None, cls=None, fig_name='graph.png'):

        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")

            g = nx.from_numpy_matrix(am)

            if position is None:
                position=nx.drawing.circular_layout(g)

            fig = plt.figure()

            if cls is None:
                cls='r'
            else:
                # Make a user-defined colormap.
                cm1 = mcol.LinearSegmentedColormap.from_list("MyCmapName", ["r", "b"])

                # Make a normalizer that will map the time values from
                # [start_time,end_time+1] -> [0,1].
                cnorm = mcol.Normalize(vmin=0, vmax=1)

                # Turn these into an object that can be used to map time values to colors and
                # can be passed to plt.colorbar().
                cpick = cm.ScalarMappable(norm=cnorm, cmap=cm1)
                cpick.set_array([])
                cls = cpick.to_rgba(cls)
                plt.colorbar(cpick, ax=fig.add_subplot(111))


            nx.draw(g, pos=position, node_color=cls, ax=fig.add_subplot(111))

            fig.savefig(os.path.join(self.plotdir, fig_name))
项目:TextRankPlus    作者:zuoxiaolei    | 项目源码 | 文件源码
def sort_words(vertex_source, edge_source, model, window = 2, pagerank_config = {'alpha': 0.85,}):
    """??????????????

    Keyword arguments:
    vertex_source   --  ???????????????????????????????pagerank????
    edge_source     --  ?????????????????????????????????pagerank???
    window          --  ????????window????????????
    pagerank_config --  pagerank???
    """

    #??????????
    sorted_words   = []
    word_index     = {}
    index_word     = {}
    _vertex_source = vertex_source
    _edge_source   = edge_source
    words_number   = 0
    for word_list in _vertex_source:
        for word in word_list:
            if not word in word_index:
                word_index[word] = words_number
                index_word[words_number] = word
                words_number += 1

    graph = np.zeros((words_number, words_number))

    #???
    for word_list in _edge_source:
        for w1, w2 in combine(word_list, window):
            if w1 in word_index and w2 in word_index:
                index1 = word_index[w1]
                index2 = word_index[w2]
                try:
                    similarity = model.similarity(w1,w2)
                    if similarity<0:
                        similarity = 0
                    #print similarity
                except:
                    similarity = 0
                graph[index1][index2] = similarity
                graph[index2][index1] = similarity
#                graph[index1][index2] = 1.0
#                graph[index2][index1] = 1.0

    nx_graph = nx.from_numpy_matrix(graph)

    scores = nx.pagerank(nx_graph, max_iter=100,**pagerank_config)          # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
    for index, score in sorted_scores:
        item = AttrDict(word=index_word[index], weight=score)
        sorted_words.append(item)
    return sorted_words
项目:sakmapper    作者:szairis    | 项目源码 | 文件源码
def mapper_graph(df, lens_data=None, lens='pca', resolution=10, gain=0.5, equalize=True, clust='kmeans', stat='db',
                 max_K=5):
    """
    input: N x n_dim image of of raw data under lens function, as a dataframe
    output: (undirected graph, list of node contents, dictionary of patches)
    """
    if lens_data is None:
        lens_data = apply_lens(df, lens=lens)

    patch_clusterings = {}
    counter = 0
    patches = covering_patches(lens_data, resolution=resolution, gain=gain, equalize=equalize)
    for key, patch in patches.items():
        if len(patch) > 0:
            patch_clusterings[key] = optimal_clustering(df, patch, method=clust, statistic=stat, max_K=max_K)
            counter += 1
    print 'total of {} patches required clustering'.format(counter)

    all_clusters = []
    for key in patch_clusterings:
        all_clusters += patch_clusterings[key]
    num_nodes = len(all_clusters)
    print 'this implies {} nodes in the mapper graph'.format(num_nodes)

    A = np.zeros((num_nodes, num_nodes))
    for i in range(num_nodes):
        for j in range(i):
            overlap = set(all_clusters[i]).intersection(set(all_clusters[j]))
            if len(overlap) > 0:
                A[i, j] = 1
                A[j, i] = 1

    G = nx.from_numpy_matrix(A)
    total = []
    all_clusters_new = []
    mapping = {}
    cont = 0
    for m in all_clusters:
        total += m
    for n, m in enumerate(all_clusters):
        if len(m) == 1 and total.count(m) > 1:
            G.remove_node(n)
        else:
            all_clusters_new.append(m)
            mapping[n] = cont
            cont += 1
    H = nx.relabel_nodes(G, mapping)
    return H, all_clusters_new, patches
项目:JustCopy    作者:exe1023    | 项目源码 | 文件源码
def sort_words(vertex_source, edge_source, window = 2, pagerank_config = {'alpha': 0.85,}):
    """??????????????

    Keyword arguments:
    vertex_source   --  ???????????????????????????????pagerank????
    edge_source     --  ?????????????????????????????????pagerank???
    window          --  ????????window????????????
    pagerank_config --  pagerank???
    """
    sorted_words   = []
    word_index     = {}
    index_word     = {}
    _vertex_source = vertex_source
    _edge_source   = edge_source
    words_number   = 0
    for word_list in _vertex_source:
        for word in word_list:
            if not word in word_index:
                word_index[word] = words_number
                index_word[words_number] = word
                words_number += 1

    graph = np.zeros((words_number, words_number))

    for word_list in _edge_source:
        for w1, w2 in combine(word_list, window):
            if w1 in word_index and w2 in word_index:
                index1 = word_index[w1]
                index2 = word_index[w2]
                graph[index1][index2] = 1.0
                graph[index2][index1] = 1.0

    debug('graph:\n', graph)

    nx_graph = nx.from_numpy_matrix(graph)
    scores = nx.pagerank(nx_graph, **pagerank_config)          # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
    for index, score in sorted_scores:
        item = AttrDict(word=index_word[index], weight=score)
        sorted_words.append(item)

    return sorted_words
项目:BrainModulyzer    作者:sugeerth    | 项目源码 | 文件源码
def Find_InterModular_Edge_correlativity(self):
        # Induced graph is the data structure responsible for the adjacency matrix of the community
        self.Matrix = nx.to_numpy_matrix(self.induced_graph)
        # Matrix Before calculating the correlation strength
        # finding out the lower half values of the matrix, can discard other values as computationally intensive
        self.Matrix = np.tril(self.Matrix,-1)
        i=0
        Sum = 0 
        j=0 
        SumTemp = 0
        Edges = 0 
        nodes1 = [item for item in self.Graphwidget.scene().items() if isinstance(item, Node)]
        # ite1rateing over the indices
        for community in set(self.partition.values()):
            i= i + 1
            j=0
            for community2 in set(self.partition.values()):
                j= j + 1
                # Not Calculating the communities which are communities to itself 
                if community == community2:
                    continue
                # Calculating the correlation strength only with the lower half of the adjacency matrix
                if i <= j: 
                    continue
                # list_nodes1 and list_nodes2 indicate which nodes are actually present in these communties
                list_nodes1 = [nodes for nodes in self.partition.keys() if self.partition[nodes] == community]
                list_nodes2 = [nodes for nodes in self.partition.keys() if self.partition[nodes] == community2]
                # Re-initializing the 
                SumTemp = 0
                Edges = 0
                for node1 in nodes1:
                    if node1.counter-1 in list_nodes1:
                        for node2 in nodes1:
                            if node2.counter-1 in list_nodes2:
                                if node1.counter-1 == node2.counter-1:
                                        continue
                                if self.Graphwidget.Graph_data().ThresholdData[node1.counter-1][node2.counter-1] > 0:
                                    Edges = Edges + 1
                if Edges != 0: 
                    Sum=float("{0:.2f}".format(self.Matrix[i-1,j-1]/Edges))
                self.Matrix[i-1,j-1] = Sum

        self.induced_graph = nx.from_numpy_matrix(self.Matrix)
项目:dyfunconn    作者:makism    | 项目源码 | 文件源码
def threshold_shortest_paths(mtx, treatment=False):
    """ Threshold a graph via  via shortest path identification using Dijkstra's algorithm.

    .. [Dimitriadis2010] Dimitriadis, S. I., Laskaris, N. A., Tsirka, V., Vourkas, M., Micheloyannis, S., & Fotopoulos, S. (2010). Tracking brain dynamics via time-dependent network analysis. Journal of neuroscience methods, 193(1), 145-155.



    Parameters
    ----------
    mtx : array-like, shape(N, N)
        Symmetric, weighted and undirected connectivity matrix.

    treatment : boolean
        Convert the weights to distances by inversing the matrix. Also,
        fill the diagonal with zeroes. Default `false`.


    Returns
    -------
    binary_mtx : array-like, shape(N, N)
        A binary mask matrix.
    """
    imtx = mtx
    if treatment:
        imtx = 1.0 / mtx
        np.fill_diagonal(imtx, 0.0)

    binary_mtx = np.zeros_like(imtx, dtype=np.int32)

    graph = nx.from_numpy_matrix(imtx)
    paths = dict(nx.all_pairs_dijkstra_path(graph))

    N, _ = np.shape(mtx)

    for x in range(N):
        for y in range(N):
            r_path = paths[x][y]
            num_nodes = len(r_path)

            ind1 = -1
            ind2 = -1
            for m in range(0, num_nodes - 1):
                ind1 = ind1 + 1
                ind2 = ind1 + 1

                binary_mtx[r_path[ind1], r_path[ind2]] = 1
                binary_mtx[r_path[ind2], r_path[ind1]] = 1

    return binary_mtx
项目:AIZooService    作者:zhanglbjames    | 项目源码 | 文件源码
def sort_words(vertex_source, edge_source, window = 2, pagerank_config = {'alpha': 0.85,}):
    """??????????????

    Keyword arguments:
    vertex_source   --  ???????????????????????????????pagerank????
    edge_source     --  ?????????????????????????????????pagerank???
    window          --  ????????window????????????
    pagerank_config --  pagerank???
    """
    sorted_words   = []
    word_index     = {}
    index_word     = {}
    _vertex_source = vertex_source
    _edge_source   = edge_source
    words_number   = 0
    for word_list in _vertex_source:
        for word in word_list:
            if not word in word_index:
                word_index[word] = words_number
                index_word[words_number] = word
                words_number += 1

    graph = np.zeros((words_number, words_number))

    for word_list in _edge_source:
        for w1, w2 in combine(word_list, window):
            if w1 in word_index and w2 in word_index:
                index1 = word_index[w1]
                index2 = word_index[w2]
                graph[index1][index2] = 1.0
                graph[index2][index1] = 1.0

    debug('graph:\n', graph)

    nx_graph = nx.from_numpy_matrix(graph)
    scores = nx.pagerank(nx_graph, **pagerank_config)          # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
    for index, score in sorted_scores:
        item = AttrDict(word=index_word[index], weight=score)
        sorted_words.append(item)

    return sorted_words
项目:Graduation-design    作者:Baichenjia    | 项目源码 | 文件源码
def build_matrix():
    ######????? ? ? ?????
    word_index = {}  # ????????
    index_word = {}  # ????????
    weibo_data = handel_weibo_data()  # ????????????
    index = 0
    for sent in weibo_data:  # ?????
        for word in sent:   # ?????????
            if not word in word_index.keys():
                word_index[word] = index
                index_word[index] = word
                index += 1
    words_number = index
    #print "words_number", words_number
    #######???????
    graph = np.zeros((words_number, words_number))  # ??????
    for word_list in weibo_data:  # ???
        for i in range(len(word_list)):  # ???????????????????????????????
            for j in range(i, len(word_list)):
                w1 = word_list[i]
                w2 = word_list[j]  # ???????????
                index1 = word_index[w1]
                index2 = word_index[w2]
                graph[index1][index2] += 1   # ?????????1
                graph[index2][index1] += 1   # ?????????
    ######?????networkx??pagerank?????????????????
    nx_graph = nx.from_numpy_matrix(graph)  # ??networdx
    scores = nx.pagerank(nx_graph, alpha=0.85)  # ??pagerank??
    sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)  # ????????
    key_words = []  # ??????????
    for index, score in sorted_scores:
        if index_word[index] == u'??' or index_word[index] == u'??' or len(index_word[index]) == 1:
            continue
        key_words.append((index_word[index], score))
    ########????????100????????
    fp_textrank_result = open('f://emotion/mysite/Label_extract/result_textrank.txt', 'w+')
    for i in range(100):
        fp_textrank_result.write(key_words[i][0] + ' ' + str(round(key_words[i][1], 10)))
        fp_textrank_result.write('\n')
    fp_textrank_result.close()
    """
    fp_test = open('f://emotion/mysite/Label_extract/test.txt', 'w+')
    for i in range(100):
        fp_test.write(key_words[i][0] + '?')
    fp_test.close()
    """
    print "textrank key word calculate is success..."
    return key_words
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def plot_ibp(model, target_dir=None, block=False, columns=[0], separate=False, K=4):

    G = nx.from_numpy_matrix(model.Y(), nx.DiGraph())
    F = model.leftordered()
    W = model._W

    # Plot Adjacency Matrix
    draw_adjmat(model._Y)
    # Plot Log likelihood
    plot_csv(target_dir=target_dir, columns=columns, separate=separate)
    #W[np.where(np.logical_and(W>-1.6, W<1.6))] = 0
    #W[W <= -1.6]= -1
    #W[W >= 1.6] = 1

    # KMeans test
    clusters = kmeans(F, K=K)
    nodelist_kmeans = [k[0] for k in sorted(zip(range(len(clusters)), clusters), key=lambda k: k[1])]
    adj_mat_kmeans = nx.adjacency_matrix(G, nodelist=nodelist_kmeans).A
    draw_adjmat(adj_mat_kmeans, title='KMeans on feature matrix')
    # Adjacency matrix generation
    draw_adjmat(model.generate(nodelist_kmeans), title='Generated Y from ILFRM')

    # training Rescal
    R = rescal(model._Y, K)
    R = R[nodelist_kmeans, :][:, nodelist_kmeans]
    draw_adjmat(R, 'Rescal generated')

    # Networks Plots
    f = plt.figure()

    ax = f.add_subplot(121)
    title = 'Features matrix, K = %d' % model._K
    ax.set_title(title)
    ColorMap(F, pixelspervalue=5, title=title, ax=ax)

    ax = f.add_subplot(122)
    ax.set_title('W')
    img = ax.imshow(W, interpolation='None')
    plt.colorbar(img)

    f = plt.figure()
    ax = f.add_subplot(221)
    ax.set_title('Spectral')
    nx.draw_spectral(G, axes=ax)
    ax = f.add_subplot(222)
    ax.set_title('Spring')
    nx.draw(G, axes=ax)
    ax = f.add_subplot(223)
    ax.set_title('Random')
    nx.draw_random(G, axes=ax)
    ax = f.add_subplot(224)
    ax.set_title('graphviz')
    try:
        nx.draw_graphviz(G, axes=ax)
    except:
        pass

    display(block=block)
项目:my_experiment    作者:Giuliao    | 项目源码 | 文件源码
def init_network(self, vertex_num=5, p=0.9, directed=False, file_name=None, adjMatrix=None):
        """ init the network by reading a file

        :param file_name: 
                the first line is the number of vertex
                the next lines of which the first number is the vertex as 
                the start point then the next are the end respectively
        :param vertex_num:
        :param p:

        :return:

        """
        local_adjMatrix = adjMatrix
        if not file_name:
            # init by random
            # local_list = np.random.permutation(vertex_num)
            # local_adjMatrix = np.zeros([vertex_num, vertex_num], dtype=np.int)
            #
            # for index, var in enumerate(local_list):
            #     if index == vertex_num - 1:
            #         break
            #
            #     kk = np.random.randint(0, 2)  # control the direction of a matrix
            #     if kk == 0:
            #         local_adjMatrix[local_list[index]][local_list[index+1]] = 1
            #     else:
            #         local_adjMatrix[local_list[index+1]][local_list[index]] = 1
            #
            # m = np.random.randint(1, vertex_num*vertex_num*vertex_num)  # control the density of the matrix
            # for i in range(m):
            #     p1 = np.random.randint(0, vertex_num)
            #     p2 = np.random.randint(0, vertex_num)
            #     while p1 == p2:
            #         p2 = np.random.randint(0, vertex_num)
            #     local_adjMatrix[p1][p2] = 1
            if local_adjMatrix is None:
                local_G = nx.binomial_graph(vertex_num, p, directed=directed)
            else:
                local_G = nx.from_numpy_matrix(local_adjMatrix)
                self.vertex_num = local_adjMatrix.shape[0]

        else:
            # init by file
            with open(file_name, 'r') as fd:
                for line in fd.readlines():
                    tt = line.split(' ')

                    if len(tt) == 1:
                        vv = int(tt[0])
                        local_adjMatrix = np.zeros([vv, vv], dtype=np.int)
                        self.vertex_num = vv
                        continue

                    for i in range(1, len(tt)):
                        local_adjMatrix[int(tt[0])-1][int(tt[i])-1] = 1

            local_G = nx.from_numpy_matrix(local_adjMatrix)

        return local_G
项目:TextAsGraphClassification    作者:NightmareNyx    | 项目源码 | 文件源码
def test():
    mnist = input_data.read_data_sets("MINST_data", one_hot=False)
    train_data = mnist.train.images.astype(np.float32)
    fraction = 50
    train_labels = mnist.train._labels[:fraction]
    with open('sugbgraphs_labels.pickle', 'wb') as f:
        pickle.dump(train_labels, f)

    test_data = mnist.test.images.astype(np.float32)
    print(train_data.shape)
    patch_size = 4
    n_ids = range(patch_size * patch_size)
    A = np.ones((patch_size * patch_size, patch_size * patch_size))
    np.fill_diagonal(A, 0)
    cc = 0
    train = []

    bins = list(np.linspace(0.0, 1.0, 10))
    for sample in train_data[:fraction]:
        sample = sample.reshape((28, 28))
        sugbg = []
        patches = image.extract_patches_2d(sample, (patch_size, patch_size))
        cc += 1
        for p in patches:
            if np.sum(p) == 0:
                continue
            G1 = nx.from_numpy_matrix(A)
            dictionary = dict(zip(n_ids, np.digitize(p.flatten(), bins)))
            nx.set_node_attributes(G1, 'label', dictionary)
            sugbg.append(G1)
        train.append(sugbg)
        print(cc)

    with open('sugbgraphs_train.pickle', 'wb') as f:
        pickle.dump(train, f)

    del train
    test = []
    for sample in test_data[:5]:
        sample = sample.reshape((28, 28))
        sugbg = []
        patches = image.extract_patches_2d(sample, (patch_size, patch_size))
        for p in patches:
            if np.sum(p) == 0:
                continue

            G1 = nx.from_numpy_matrix(A)
            p = np.histogram(p.flatten(), bins=np.linspace(0.0, 1.0, 10))[0]
            dictionary = dict(zip(n_ids, p))
            nx.set_node_attributes(G1, 'label', dictionary)
            sugbg.append(G1)
        test.append(sugbg)
    with open('sugbgraphs_test.pickle', 'wb') as f:
        pickle.dump(sugbg, f)