Python networkx 模块,closeness_centrality() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用networkx.closeness_centrality()

项目:Visualization-of-popular-algorithms-in-Python    作者:MUSoC    | 项目源码 | 文件源码
def CentralityMeasures(G):
    # Betweenness centrality
    bet_cen = nx.betweenness_centrality(G)
    # Closeness centrality
    clo_cen = nx.closeness_centrality(G)
    # Eigenvector centrality
    eig_cen = nx.eigenvector_centrality(G)
    # Degree centrality
    deg_cen = nx.degree_centrality(G)
    #print bet_cen, clo_cen, eig_cen
    print "# Betweenness centrality:" + str(bet_cen)
    print "# Closeness centrality:" + str(clo_cen)
    print "# Eigenvector centrality:" + str(eig_cen)
    print "# Degree centrality:" + str(deg_cen)


#main function
项目:Visualization-of-popular-algorithms-in-Python    作者:MUSoC    | 项目源码 | 文件源码
def CentralityMeasures(G):
    # Betweenness centrality
    bet_cen = nx.betweenness_centrality(G)
    # Closeness centrality
    clo_cen = nx.closeness_centrality(G)
    # Eigenvector centrality
    eig_cen = nx.eigenvector_centrality(G)
    # Degree centrality
    deg_cen = nx.degree_centrality(G)
    #print bet_cen, clo_cen, eig_cen
    print "# Betweenness centrality:" + str(bet_cen)
    print "# Closeness centrality:" + str(clo_cen)
    print "# Eigenvector centrality:" + str(eig_cen)
    print "# Degree centrality:" + str(deg_cen)


#main function
项目:Visualization-of-popular-algorithms-in-Python    作者:MUSoC    | 项目源码 | 文件源码
def CentralityMeasures(G):
    # Betweenness centrality
    bet_cen = nx.betweenness_centrality(G)
    # Closeness centrality
    clo_cen = nx.closeness_centrality(G)
    # Eigenvector centrality
    eig_cen = nx.eigenvector_centrality(G)
    # Degree centrality
    deg_cen = nx.degree_centrality(G)
    #print bet_cen, clo_cen, eig_cen
    print "# Betweenness centrality:" + str(bet_cen)
    print "# Closeness centrality:" + str(clo_cen)
    print "# Eigenvector centrality:" + str(eig_cen)
    print "# Degree centrality:" + str(deg_cen)


#main function
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Nodes_Ranking(G, index):
    #Katz_Centrality = nx.katz_centrality(G)
    #print "Katz_Centrality:", sorted(Katz_Centrality.iteritems(), key=lambda d:d[1], reverse = True)
    #Page_Rank(G)
    if index == "degree_centrality":
        return Degree_Centrality(G)
    if index == "degree_mass_Centrality":
        return Degree_Mass_Centrality(G)
    if index == "between_centrality":
        return Between_Centrality(G)
    if index == "closeness_centrality":
        return Closeness_Centrality(G)
    if index == "kshell_centrality":
        return KShell_Centrality(G)
    if index == "eigen_centrality":
        return Eigen_Centrality_Andy(G)
    if index == "collective_influence":
        return Collective_Influence(G)
    if index == "enhanced_collective_centrality":
        return Enhanced_Collective_Influence(G)
    if index == "hybrid_diffusion_centrality":
        return Hybrid_Diffusion_Centrality(G)
项目:PedWorks    作者:BrnCPrz    | 项目源码 | 文件源码
def calculate_closeness(graph):
    print "\n\tCalculating Closeness Centrality..."
    g = graph
    clo = nx.closeness_centrality(g)
    nx.set_node_attributes(g, 'closeness', clo)
    degclos_sorted = sorted(clo.items(), key=itemgetter(1), reverse=True)
    for key, value in degclos_sorted[0:10]:
        print "\t   > ", key, round(value, 4)
    return g, clo
项目:BrainModulyzer    作者:sugeerth    | 项目源码 | 文件源码
def UpdateThresholdDegree(self):
        self.g =  self.Graph_data().DrawHighlightedGraph(self.EdgeSliderValue)

        # Degree Centrality for the the nodes involved
        self.Centrality=nx.degree_centrality(self.g)
        self.Betweeness=nx.betweenness_centrality(self.g)  
        self.ParticipationCoefficient = self.communityDetectionEngine.participation_coefficient(self.g,True)
        self.LoadCentrality = nx.load_centrality(self.g)
        self.ClosenessCentrality = nx.closeness_centrality(self.g)

        for i in range(len(self.ParticipationCoefficient)):
            if (str(float(self.ParticipationCoefficient[i])).lower() == 'nan'):
                   self.ParticipationCoefficient[i] = 0

        i = 0
        """ Calculate rank and Zscore """
        MetrixDataStructure=eval('self.'+self.nodeSizeFactor)

        from collections import OrderedDict
        self.sortedValues = OrderedDict(sorted(MetrixDataStructure.items(), key=lambda x:x[1]))

        self.average = np.average(self.sortedValues.values())
        self.std = np.std(self.sortedValues.values())

        for item in self.scene().items():
            if isinstance(item, Node):
                Size = eval('self.'+self.nodeSizeFactor+'[i]')
                rank, Zscore = self.calculateRankAndZscore(i)
                item.setNodeSize(Size,self.nodeSizeFactor,rank,Zscore)    
                i = i + 1

        self.ThresholdChange.emit(True)
        if not(self.ColorNodesBasedOnCorrelation): 
            self.Ui.communityLevelLineEdit.setText(str(self.level))
            self.DendoGramDepth.emit(self.level)

        self.Refresh()
项目:ocean    作者:worldoss    | 项目源码 | 文件源码
def central_list(E):
    centralities = []
    centralities.append(nx.in_degree_centrality(E))
    centralities.append(nx.out_degree_centrality(E))
    centralities.append(nx.closeness_centrality(E))
    centralities.append(nx.betweenness_centrality(E))
    centralities.append(nx.eigenvector_centrality(E))

    for node in E.nodes_iter():
      measures = ("\t").join(map(lambda f: str(f[node]), centralities))
      print("%s: %s" % (node, measures))
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Closeness_Centrality(G):
    Closeness_Centrality = nx.closeness_centrality(G)
    #print "Closeness_Centrality:", sorted(Closeness_Centrality.iteritems(), key=lambda d:d[1], reverse = True)
    return Closeness_Centrality
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Closeness_Centrality(G):
    Closeness_Centrality = nx.closeness_centrality(G)
    #print "Closeness_Centrality:", sorted(Closeness_Centrality.iteritems(), key=lambda d:d[1], reverse = True)
    return Closeness_Centrality
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Nodes_Ranking(G, index):
    if index == "degree_centrality":
        return Degree_Centrality(G)
    if index == "between_centrality":
        return Between_Centrality(G)
    if index == "closeness_centrality":
        return Closeness_Centrality(G)
    if index == "pagerank_centrality":
        return Page_Rank(G)
    if index == "kshell_centrality":
        return KShell_Centrality(G)
    if index == "collective_influence":
        return Collective_Influence(G)
    if index == "enhanced_collective_centrality":
        return Enhanced_Collective_Influence(G)

    if index == "eigen_centrality":
        return Eigen_Centrality_Avg(G) #Eigen_Centrality_Andy(G)

    if index == "md_eigen_centrality":
        return MD_Eigen_Centrality_Andy(G)
    if index == "hc_eigen_centrality":
        return HC_Eigen_Centrality_Andy(G)

    #if index == "hybrid_diffusion_centrality":
    #    return Hybrid_Diffusion_Centrality(G)


    if index == "PIR_Centrality": #i.e. weighted_hybrid_diffusion_centrality
        return PIR_Centrality_Avg(G) #Weighted_Hybrid_Diffusion_Centrality(G)
项目:synchrony    作者:cknd    | 项目源码 | 文件源码
def statistics(self):
        """Return some topological information about the experiment"""
        stat = {}
        stat["net diameter"] = nx.diameter(self.network)
        stat["net radius"]   = nx.radius(self.network)
        stat["net asp"]     = nx.average_shortest_path_length(self.network)
        stat["input asp"] = net.inputASL(self.network, self.inputc)
        for m in self.measures.values():
            distr = net.distances_to_roi(self.network, self.inputc,m.roi)
            stat["stim to roi distances, mean",m.name] = np.mean(distr)
            stat["stim to roi distances, var",m.name] = np.var(distr)
            centrs = nx.closeness_centrality(self.network)
            stat["roi centralities",m.name] = [centrs[tuple(node)]
                                                for node in np.transpose(m.roi.nonzero())]
        return stat
项目:synchrony    作者:cknd    | 项目源码 | 文件源码
def createroiidxs(network,distance):
    """
    Choose two central nodes, some distance apart, and return their (i,j) indices.

    Args:
        network: networkx graph
        distance: how far apart the two nodes should be.

    Returns:
        A tuple of two (i,j) indices / node labels
    """
    nodes,centralities = zip(*nx.closeness_centrality(network).items())
    # sort nodes from most central to least central:
    centr_arxs = np.argsort(centralities)
    nodes_sorted = [n for n in reversed(np.array(nodes)[centr_arxs])]
    k = 0
    while k<len(nodes_sorted):
        # pick some node in the middle of the graph (high centrality)
        middlenode = tuple(nodes_sorted[k])
        # now pick the most central node that meets the given distance criterion.
        # [since we dont want to end up near the boundaries)
        for n in nodes_sorted:
            if nx.shortest_path_length(network,middlenode,tuple(n)) == distance:
                return middlenode,tuple(n)
        # if that didnt work, try starting with a different, less central middlenode.
        k = k+1
    raise Exception("speficied distance to high for this network")
项目:AdjMatrix-Generation    作者:weiyiliuIBM    | 项目源码 | 文件源码
def main(filename, type, constructed_graph = -1):
    # 1. original graph
    original_graph_path = os.path.join("data",filename,"")
    original_graph = generate_graph(original_graph_path,filename,-1)
    plt.figure("original graph degree distribution")
    draw_degree(original_graph)
    print('original edge number: ',len(original_graph.edges()))


    # 2. reconstruct graph
    if constructed_graph == -1:
        reconstruct_graph_path = os.path.join("reconstruction", filename, type,"")
        reconstruct_graph_adj = pickle.load(open(glob.glob(reconstruct_graph_path+"*.adj")[0],'rb'))
    else:
        reconstruct_graph_adj = constructed_graph
    reconstruct_graph = adj2Graph(reconstruct_graph_adj, edgesNumber = len(original_graph.edges()))
    print('edge number: ', len(reconstruct_graph.edges()))
    plt.figure("reconstruct graph degree distribution")
    draw_degree(reconstruct_graph)

    print("Clustering: ",nx.average_clustering(original_graph), ' ', nx.average_clustering(reconstruct_graph))
    # print("Diameter: ", nx.average_shortest_path_length(original_graph), ' ', nx.average_shortest_path_length(reconstruct_graph))
    # print("degree centrality: ", nx.degree_centrality(original_graph), ' ',  nx.degree_centrality(reconstruct_graph))
    #print("closeness centrality: ", nx.closeness_centrality(original_graph), ' ', nx.closeness_centrality(reconstruct_graph))

    plt.show()
项目:anomalous-vertices-detection    作者:Kagandi    | 项目源码 | 文件源码
def closeness(self):
        """
        Parameters
        ----------

        Returns
        -------
        NxGraph: Graph object

        Examples
        --------
        >>>
        """
        return nx.closeness_centrality(self._graph)
项目:Quadflor    作者:quadflor    | 项目源码 | 文件源码
def __init__(self, method='degree', analyzer=NltkNormalizer().split_and_normalize):
        self.analyze = analyzer
        self.method = method
        self.methods_on_digraph = {'hits', 'pagerank', 'katz'}
        self._get_scores = {'degree': nx.degree, 'betweenness': nx.betweenness_centrality,
                            'pagerank': nx.pagerank_scipy, 'hits': self._hits, 'closeness': nx.closeness_centrality,
                            'katz': nx.katz_centrality}[method]
        # Add a new value when a new vocabulary item is seen
        self.vocabulary = defaultdict()
        self.vocabulary.default_factory = self.vocabulary.__len__
项目:sceneTransitionNetMovieClassification    作者:daltonsi    | 项目源码 | 文件源码
def graph_info(g):
    result = {}
    components = list(nx.strongly_connected_component_subgraphs(g))
    in_degrees = g.in_degree()
    out_degrees = g.out_degree()
    highest_in_degree_node = sorted(in_degrees, key = lambda x: in_degrees[x], reverse = True)[0]
    highest_out_degree_node = sorted(out_degrees, key = lambda x: out_degrees[x], reverse = True)[0]

    result['highest in_degree node'] = highest_in_degree_node
    result['highest out_degree_node'] = highest_out_degree_node

    result['numnber of components'] = len(components)
    result['number of nodes'] = g.number_of_nodes()
    result['number of edges'] = g.number_of_edges()
#Degree centrality
    in_degree_centrality = nx.in_degree_centrality(g)
    out_degree_centrality = nx.out_degree_centrality(g)
    result['sorted in_degree centrality'] = sorted([(el,in_degree_centrality[el]) for el in g.nodes()], key = lambda x: x[1], reverse = True)
    result['sorted out_degree centrality'] = sorted([(el,out_degree_centrality[el]) for el in g.nodes()], key = lambda x: x[1], reverse = True)

    result['closeness_centrality'] = sorted([(el,nx.closeness_centrality(g)[el]) for el in nx.closeness_centrality(g)], key = lambda x: x[1], reverse = True)
    result['highest in_degree node closeness'] = nx.closeness_centrality(g)[highest_in_degree_node]
    result['highest out_degree node closeness'] = nx.closeness_centrality(g)[highest_out_degree_node]


    result['betweenness centrality'] = sorted([(el,nx.betweenness_centrality(g)[el]) for el in nx.betweenness_centrality(g)], key = lambda x: x[1], reverse = True)
    result['highest in_degree node betweenness'] = nx.betweenness_centrality(g)[highest_in_degree_node]
    result['highest in_degree node betweenness'] = nx.betweenness_centrality(g)[highest_out_degree_node]


    largest_component = sorted (components, key = lambda x: x.number_of_nodes(), reverse = True)[0]

    result['largest strongly component percent'] = largest_component.number_of_nodes()/float(g.number_of_nodes())
    result['largest strongly component diameter'] = nx.diameter(largest_component)
    result['largest strongly component average path length'] = nx.average_shortest_path_length(largest_component)
    result['average_degree (undireceted)'] = sum(g.degree().values())/float(g.number_of_nodes())
    result['avg_cluster_coefficient (transitivity)'] = nx.transitivity(g)
    return result
项目:BrainModulyzer    作者:sugeerth    | 项目源码 | 文件源码
def changeLayout(self,Layout='sfdp'):
        Layout = (Layout.encode('ascii','ignore')).replace(' ','')
        self.g =  self.Graph_data().DrawHighlightedGraph(self.EdgeSliderValue)

        # asking community detection Engine to compute the Layout
        self.pos,Factor = self.communityDetectionEngine.communityLayoutCalculation(Layout,self.g)

        # Degree Centrality for the the nodes involved
        self.Centrality=nx.degree_centrality(self.g)
        self.Betweeness=nx.betweenness_centrality(self.g)  
        self.LoadCentrality = nx.load_centrality(self.g)
        self.ParticipationCoefficient = self.communityDetectionEngine.participation_coefficient(self.g,True)
        self.ClosenessCentrality = nx.closeness_centrality(self.g)

        for i in range(len(self.ParticipationCoefficient)):
            if (str(float(self.ParticipationCoefficient[i])).lower() == 'nan'):
                   self.ParticipationCoefficient[i] = 0
        i = 0 

        """ Calculate rank and Zscore """
        MetrixDataStructure=eval('self.'+self.nodeSizeFactor)
        from collections import OrderedDict

        self.sortedValues = OrderedDict(sorted(MetrixDataStructure.items(), key=lambda x:x[1]))
        self.average = np.average(self.sortedValues.values())
        self.std = np.std(self.sortedValues.values())

        for item in self.scene().items():
            if isinstance(item, Node):
                x,y=self.pos[i]
                item.setPos(QtCore.QPointF(x,y)*Factor)
                Size = eval('self.'+self.nodeSizeFactor+'[i]')
                rank, Zscore = self.calculateRankAndZscore(i)
                item.setNodeSize(Size,self.nodeSizeFactor,rank,Zscore)
                i = i + 1

        for edge in self.edges:
            edge().adjust()

        self.Refresh()

        if not(self.PositionPreserve):
            self.Scene_to_be_updated.setSceneRect(self.Scene_to_be_updated.itemsBoundingRect())
            self.setScene(self.Scene_to_be_updated)

        self.fitInView(self.Scene_to_be_updated.itemsBoundingRect(),QtCore.Qt.KeepAspectRatio)
        self.Scene_to_be_updated.update()
项目:analyse_website_dns    作者:mrcheng0910    | 项目源码 | 文件源码
def main():
    domain_name = 'baidu.com'
    domain_pkts = get_data(domain_name)
    node_cname, node_ip, visit_total, edges, node_main = get_ip_cname(domain_pkts[0]['details'])
    for i in domain_pkts[0]['details']:
        for v in i['answers']:
            edges.append((v['domain_name'],v['dm_data']))

    DG = nx.DiGraph()
    DG.add_edges_from(edges)

    # ?????????IP?node
    for node in DG:
        if node in node_main and DG.successors(node) in node_ip:
            print node

    # ??cname???IP????
    for node in DG:
        if node in node_cname and DG.successors(node) not in node_cname:  # ???ip?????cname
            print "node",DG.out_degree(node),DG.in_degree(node),DG.degree(node)
    # ?cname???????
    # for node in DG:
    #     if node in node_cname and DG.predecessors(node) not in node_cname:
    #         print len(DG.predecessors(node))

    for node in DG:
        if node in  node_main:
            if len(DG.successors(node)) ==3:
                print node
                print DG.successors(node)
    # print sorted(nx.degree(DG).values())

    print nx.degree_assortativity_coefficient(DG)
    average_degree = sum(nx.degree(DG).values())/(len(node_cname)+len(node_ip)+len(node_main))
    print average_degree
    print len(node_cname)+len(node_ip)+len(node_main)
    print len(edges)
    print nx.degree_histogram(DG)
    # print nx.degree_centrality(DG)
    # print nx.in_degree_centrality(DG)
    # print nx.out_degree_centrality(DG)
    # print nx.closeness_centrality(DG)
    # print nx.load_centrality(DG)