Python networkx 模块,betweenness_centrality() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用networkx.betweenness_centrality()

项目:Visualization-of-popular-algorithms-in-Python    作者:MUSoC    | 项目源码 | 文件源码
def CentralityMeasures(G):
    # Betweenness centrality
    bet_cen = nx.betweenness_centrality(G)
    # Closeness centrality
    clo_cen = nx.closeness_centrality(G)
    # Eigenvector centrality
    eig_cen = nx.eigenvector_centrality(G)
    # Degree centrality
    deg_cen = nx.degree_centrality(G)
    #print bet_cen, clo_cen, eig_cen
    print "# Betweenness centrality:" + str(bet_cen)
    print "# Closeness centrality:" + str(clo_cen)
    print "# Eigenvector centrality:" + str(eig_cen)
    print "# Degree centrality:" + str(deg_cen)


#main function
项目:Visualization-of-popular-algorithms-in-Python    作者:MUSoC    | 项目源码 | 文件源码
def CentralityMeasures(G):
    # Betweenness centrality
    bet_cen = nx.betweenness_centrality(G)
    # Closeness centrality
    clo_cen = nx.closeness_centrality(G)
    # Eigenvector centrality
    eig_cen = nx.eigenvector_centrality(G)
    # Degree centrality
    deg_cen = nx.degree_centrality(G)
    #print bet_cen, clo_cen, eig_cen
    print "# Betweenness centrality:" + str(bet_cen)
    print "# Closeness centrality:" + str(clo_cen)
    print "# Eigenvector centrality:" + str(eig_cen)
    print "# Degree centrality:" + str(deg_cen)


#main function
项目:Visualization-of-popular-algorithms-in-Python    作者:MUSoC    | 项目源码 | 文件源码
def CentralityMeasures(G):
    # Betweenness centrality
    bet_cen = nx.betweenness_centrality(G)
    # Closeness centrality
    clo_cen = nx.closeness_centrality(G)
    # Eigenvector centrality
    eig_cen = nx.eigenvector_centrality(G)
    # Degree centrality
    deg_cen = nx.degree_centrality(G)
    #print bet_cen, clo_cen, eig_cen
    print "# Betweenness centrality:" + str(bet_cen)
    print "# Closeness centrality:" + str(clo_cen)
    print "# Eigenvector centrality:" + str(eig_cen)
    print "# Degree centrality:" + str(deg_cen)


#main function
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def authorNet_feature():
    # output: compute the author centrialy for each author
    #         author centrality dict
    authorCo = pickle.load(open(cspath+"coauthor","rb")) #
    nodeSet = set()
    edgeSet = set()
    for key,val in authorCo.iteritems():
        nodeSet.add(key)
        edgeSet.update([(key,item) for item in val if item!=key])
    pickle.dump(nodeSet,open(cspath+"co_nodeSet","wb"))
    pickle.dump(edgeSet,open(cspath+"co_edgeSet","wb"))
    g = nx.Graph()
    g.add_nodes_from(nodeSet)
    g.add_edges_from(edgeSet)
    interested_node = None

    clo_cen = defaultdict(int)
    for node in g.nodes():
        clo_cen[node]=1

    # Closeness centrality
    #clo_cen = nx.betweenness_centrality(g, k=int(len(g.nodes())/5))
    #centrality is time-consuming, denote this in real atmosphere
    pickle.dump(clo_cen,open(cspath+"author_cen","wb"))
    print 'authorNet_feature finish'
项目:PedWorks    作者:BrnCPrz    | 项目源码 | 文件源码
def calculate_betweenness(graph):
    print "\n\tCalculating Betweenness Centrality..."
    g = graph
    bc = nx.betweenness_centrality(g.to_undirected())
    nx.set_node_attributes(g, 'betweenness', bc)
    degbetw_sorted = sorted(bc.items(), key=itemgetter(1), reverse=True)
    for key, value in degbetw_sorted[0:10]:
        print "\t   > ", key, round(value, 4)
    return g, bc
项目:2020plus    作者:KarchinLab    | 项目源码 | 文件源码
def main(opts):
    df = pd.read_csv(opts['biogrid'], sep='\t')
    interact_df = df[['Official Symbol Interactor A',
                      'Official Symbol Interactor B']]
    interact_genes = interact_df.dropna().values.tolist()
    G = nx.Graph()
    G.add_edges_from(map(tuple, interact_genes))
    gene_betweeness = nx.betweenness_centrality(G)
    gene_degree = G.degree()

    result = [[key, gene_betweeness[key], gene_degree[key]]
              for key in gene_degree]
    result = [['gene', 'gene_betweeness', 'gene_degree']] + result
    with open(opts['output'], 'wb') as handle:
        csv.writer(handle, delimiter='\t').writerows(result)
项目:BrainModulyzer    作者:sugeerth    | 项目源码 | 文件源码
def UpdateThresholdDegree(self):
        self.g =  self.Graph_data().DrawHighlightedGraph(self.EdgeSliderValue)

        # Degree Centrality for the the nodes involved
        self.Centrality=nx.degree_centrality(self.g)
        self.Betweeness=nx.betweenness_centrality(self.g)  
        self.ParticipationCoefficient = self.communityDetectionEngine.participation_coefficient(self.g,True)
        self.LoadCentrality = nx.load_centrality(self.g)
        self.ClosenessCentrality = nx.closeness_centrality(self.g)

        for i in range(len(self.ParticipationCoefficient)):
            if (str(float(self.ParticipationCoefficient[i])).lower() == 'nan'):
                   self.ParticipationCoefficient[i] = 0

        i = 0
        """ Calculate rank and Zscore """
        MetrixDataStructure=eval('self.'+self.nodeSizeFactor)

        from collections import OrderedDict
        self.sortedValues = OrderedDict(sorted(MetrixDataStructure.items(), key=lambda x:x[1]))

        self.average = np.average(self.sortedValues.values())
        self.std = np.std(self.sortedValues.values())

        for item in self.scene().items():
            if isinstance(item, Node):
                Size = eval('self.'+self.nodeSizeFactor+'[i]')
                rank, Zscore = self.calculateRankAndZscore(i)
                item.setNodeSize(Size,self.nodeSizeFactor,rank,Zscore)    
                i = i + 1

        self.ThresholdChange.emit(True)
        if not(self.ColorNodesBasedOnCorrelation): 
            self.Ui.communityLevelLineEdit.setText(str(self.level))
            self.DendoGramDepth.emit(self.level)

        self.Refresh()
项目:ocean    作者:worldoss    | 项目源码 | 文件源码
def central_list(E):
    centralities = []
    centralities.append(nx.in_degree_centrality(E))
    centralities.append(nx.out_degree_centrality(E))
    centralities.append(nx.closeness_centrality(E))
    centralities.append(nx.betweenness_centrality(E))
    centralities.append(nx.eigenvector_centrality(E))

    for node in E.nodes_iter():
      measures = ("\t").join(map(lambda f: str(f[node]), centralities))
      print("%s: %s" % (node, measures))
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Between_Centrality(G):
    Bet_Centrality = nx.betweenness_centrality(G)
    #print "Bet_Centrality:", sorted(Bet_Centrality.iteritems(), key=lambda d:d[1], reverse = True)
    return Bet_Centrality
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Between_Centrality(G):
    Bet_Centrality = nx.betweenness_centrality(G)
    #print "Bet_Centrality:", sorted(Bet_Centrality.iteritems(), key=lambda d:d[1], reverse = True)
    return Bet_Centrality
项目:KDDCUP2016    作者:hugochan    | 项目源码 | 文件源码
def top_centrality() :

    citations = db.select(["citing", "cited"], table="graph", limit=1000000)
    print len(citations)

    graph = nx.DiGraph()
    for citing, cited in progress(citations, 10000) :
        graph.add_edge(int(citing), int(cited))

    print graph.number_of_nodes()
    centrality = nx.betweenness_centrality(graph)
    print centrality.items()[:100]
项目:anomalous-vertices-detection    作者:Kagandi    | 项目源码 | 文件源码
def betweenness_centrality(self):
        """
        Parameters
        ----------

        Returns
        -------
        NxGraph: Graph object

        Examples
        --------
        >>>
        """
        return nx.betweenness_centrality(self._graph, weight=self._weight_field)
项目:Quadflor    作者:quadflor    | 项目源码 | 文件源码
def __init__(self, method='degree', analyzer=NltkNormalizer().split_and_normalize):
        self.analyze = analyzer
        self.method = method
        self.methods_on_digraph = {'hits', 'pagerank', 'katz'}
        self._get_scores = {'degree': nx.degree, 'betweenness': nx.betweenness_centrality,
                            'pagerank': nx.pagerank_scipy, 'hits': self._hits, 'closeness': nx.closeness_centrality,
                            'katz': nx.katz_centrality}[method]
        # Add a new value when a new vocabulary item is seen
        self.vocabulary = defaultdict()
        self.vocabulary.default_factory = self.vocabulary.__len__
项目:sceneTransitionNetMovieClassification    作者:daltonsi    | 项目源码 | 文件源码
def graph_info(g):
    result = {}
    components = list(nx.strongly_connected_component_subgraphs(g))
    in_degrees = g.in_degree()
    out_degrees = g.out_degree()
    highest_in_degree_node = sorted(in_degrees, key = lambda x: in_degrees[x], reverse = True)[0]
    highest_out_degree_node = sorted(out_degrees, key = lambda x: out_degrees[x], reverse = True)[0]

    result['highest in_degree node'] = highest_in_degree_node
    result['highest out_degree_node'] = highest_out_degree_node

    result['numnber of components'] = len(components)
    result['number of nodes'] = g.number_of_nodes()
    result['number of edges'] = g.number_of_edges()
#Degree centrality
    in_degree_centrality = nx.in_degree_centrality(g)
    out_degree_centrality = nx.out_degree_centrality(g)
    result['sorted in_degree centrality'] = sorted([(el,in_degree_centrality[el]) for el in g.nodes()], key = lambda x: x[1], reverse = True)
    result['sorted out_degree centrality'] = sorted([(el,out_degree_centrality[el]) for el in g.nodes()], key = lambda x: x[1], reverse = True)

    result['closeness_centrality'] = sorted([(el,nx.closeness_centrality(g)[el]) for el in nx.closeness_centrality(g)], key = lambda x: x[1], reverse = True)
    result['highest in_degree node closeness'] = nx.closeness_centrality(g)[highest_in_degree_node]
    result['highest out_degree node closeness'] = nx.closeness_centrality(g)[highest_out_degree_node]


    result['betweenness centrality'] = sorted([(el,nx.betweenness_centrality(g)[el]) for el in nx.betweenness_centrality(g)], key = lambda x: x[1], reverse = True)
    result['highest in_degree node betweenness'] = nx.betweenness_centrality(g)[highest_in_degree_node]
    result['highest in_degree node betweenness'] = nx.betweenness_centrality(g)[highest_out_degree_node]


    largest_component = sorted (components, key = lambda x: x.number_of_nodes(), reverse = True)[0]

    result['largest strongly component percent'] = largest_component.number_of_nodes()/float(g.number_of_nodes())
    result['largest strongly component diameter'] = nx.diameter(largest_component)
    result['largest strongly component average path length'] = nx.average_shortest_path_length(largest_component)
    result['average_degree (undireceted)'] = sum(g.degree().values())/float(g.number_of_nodes())
    result['avg_cluster_coefficient (transitivity)'] = nx.transitivity(g)
    return result
项目:WNTR    作者:USEPA    | 项目源码 | 文件源码
def central_point_dominance(self):
        """
        Compute central point dominance.

        Returns
        -------
        cpd : float
            Central point dominance
        """
        bet_cen = nx.betweenness_centrality(self.to_undirected())
        bet_cen = list(bet_cen.values())
        cpd = sum(max(bet_cen) - np.array(bet_cen))/(len(bet_cen)-1)

        return cpd
项目:pybel-tools    作者:pybel    | 项目源码 | 文件源码
def count_top_centrality(graph, number=30):
    dd = nx.betweenness_centrality(graph)
    dc = Counter(dd)
    return dict(dc.most_common(number))
项目:pybel-tools    作者:pybel    | 项目源码 | 文件源码
def calculate_betweenness_centality(graph, k=CENTRALITY_SAMPLES):
    """Calculates the betweenness centrality over nodes in the graph. Tries to do it with a certain number of samples,
    but then tries a complete approach if it fails.

    :param pybel.BELGraph graph: A BEL graph
    :param int k: The number of samples to use
    :rtype: collections.Counter[tuple,float]
    """
    try:
        res = Counter(nx.betweenness_centrality(graph, k=k))
        return res
    except:
        return Counter(nx.betweenness_centrality(graph))
项目:BrainModulyzer    作者:sugeerth    | 项目源码 | 文件源码
def changeLayout(self,Layout='sfdp'):
        Layout = (Layout.encode('ascii','ignore')).replace(' ','')
        self.g =  self.Graph_data().DrawHighlightedGraph(self.EdgeSliderValue)

        # asking community detection Engine to compute the Layout
        self.pos,Factor = self.communityDetectionEngine.communityLayoutCalculation(Layout,self.g)

        # Degree Centrality for the the nodes involved
        self.Centrality=nx.degree_centrality(self.g)
        self.Betweeness=nx.betweenness_centrality(self.g)  
        self.LoadCentrality = nx.load_centrality(self.g)
        self.ParticipationCoefficient = self.communityDetectionEngine.participation_coefficient(self.g,True)
        self.ClosenessCentrality = nx.closeness_centrality(self.g)

        for i in range(len(self.ParticipationCoefficient)):
            if (str(float(self.ParticipationCoefficient[i])).lower() == 'nan'):
                   self.ParticipationCoefficient[i] = 0
        i = 0 

        """ Calculate rank and Zscore """
        MetrixDataStructure=eval('self.'+self.nodeSizeFactor)
        from collections import OrderedDict

        self.sortedValues = OrderedDict(sorted(MetrixDataStructure.items(), key=lambda x:x[1]))
        self.average = np.average(self.sortedValues.values())
        self.std = np.std(self.sortedValues.values())

        for item in self.scene().items():
            if isinstance(item, Node):
                x,y=self.pos[i]
                item.setPos(QtCore.QPointF(x,y)*Factor)
                Size = eval('self.'+self.nodeSizeFactor+'[i]')
                rank, Zscore = self.calculateRankAndZscore(i)
                item.setNodeSize(Size,self.nodeSizeFactor,rank,Zscore)
                i = i + 1

        for edge in self.edges:
            edge().adjust()

        self.Refresh()

        if not(self.PositionPreserve):
            self.Scene_to_be_updated.setSceneRect(self.Scene_to_be_updated.itemsBoundingRect())
            self.setScene(self.Scene_to_be_updated)

        self.fitInView(self.Scene_to_be_updated.itemsBoundingRect(),QtCore.Qt.KeepAspectRatio)
        self.Scene_to_be_updated.update()