Python networkx 模块,connected_components() 实例源码

我们从Python开源项目中,提取了以下40个代码示例,用于说明如何使用networkx.connected_components()

项目:gym-extensions    作者:Breakend    | 项目源码 | 文件源码
def merge_rectangles_into_obstacles(self, centers, widths, heights, epsilon):
        """Merges rectangles defined by centers, widths, heights. Two rectangles
        with distance < epsilon are considered part of the same object."""

        G = nx.Graph()
        obstacles = {i: Obstacle(centers[i, :], widths[i, 0], heights[i, 0]) for i in range(len(centers))}
        G.add_nodes_from(obstacles.keys())

        for i in obstacles:
            for j in obstacles:
                if i != j and obstacles[i].distance_to_obstacle(obstacles[j]) < epsilon:
                    G.add_edge(i,j)

        merged_obstacles = {}
        conn_components = nx.connected_components(G)
        for cc in conn_components:
            cc = list(cc)
            new_obs = obstacles[cc[0]]
            for i in range(1, len(cc)):
                new_obs.merge(obstacles[cc[i]])

            merged_obstacles[cc[0]] = new_obs

        return merged_obstacles
项目:community-networks-monitoring-tools    作者:netCommonsEU    | 项目源码 | 文件源码
def getOwnerRobustness(self, graph):
        """ compute the "owner robustness """

        ownerNodes, nodeOwner = self.get_owner_distribution(graph)
        print "# owner".rjust(long_align_space), ",",\
              "main C. size".rjust(long_align_space), ",",\
              "number of components".rjust(long_align_space)
        for owner, nodes in sorted(ownerNodes.items(),
                                   key=lambda(x): -len(x[1])):
            purged_graph = nx.Graph(graph)
            for n in nodes:
                purged_graph.remove_node(n)
            comp_list = list(nx.connected_components(purged_graph))
            main_comp = sorted(comp_list, key=len, reverse=True)[0]
            print owner.rjust(long_align_space), ",",\
                str(len(main_comp)).rjust(long_align_space), ",", \
                str(len(comp_list)).rjust(long_align_space)
        print ""
        print ""

    #  ################# helper functions
    # These functions are needed to handle data structures from
    # other sources of data. You can use a database and dump the
    # data in XML from a db. You probably do not need these functions.
项目:MDAnalysis-with-Dask    作者:Becksteinlab    | 项目源码 | 文件源码
def Leaflet_finder(traj, i, j, ii, jj, len_chunks, cutoff):
    g1 = np.load(os.path.abspath(os.path.normpath(os.path.join(os.getcwd(),traj))), mmap_mode='r')[i:i+len_chunks]
    g2 = np.load(os.path.abspath(os.path.normpath(os.path.join(os.getcwd(),traj))), mmap_mode='r')[j:j+len_chunks]

    block = np.zeros((len(g1),len(g2)),dtype=float)
    block[:,:] = cdist(g1, g2) <= cutoff
    adj_list = np.where(block[:,:] == True)        
    adj_list = np.vstack(adj_list)

    adj_list[0] = adj_list[0]+ii*len_chunks
    adj_list[1] = adj_list[1]+jj*len_chunks

    if adj_list.shape[1] == 0:
        adj_list=np.zeros((2,1))

    graph = nx.Graph()
    edges = [(adj_list[0,k],adj_list[1,k]) for k in range(0,adj_list.shape[1])]
    graph.add_edges_from(edges)
    leaflet = sorted(nx.connected_components(graph), key=len, reverse=True)
    l_connected = [] # Keep only connected components
    for lng in range(len(leaflet)):
        l_connected.append(leaflet[lng])

    return list(l_connected)
项目:MDAnalysis-with-Dask    作者:Becksteinlab    | 项目源码 | 文件源码
def Leaflet_finder(block, traj, cutoff, len_atom, len_chunks, block_id=None):
    id_0 = block_id[0]
    id_1 = block_id[1]

    block[:,:] = cdist(np.load(traj, mmap_mode='r')[id_0*len_chunks:(id_0+1)*len_chunks], np.load(traj, mmap_mode='r')[id_1*len_chunks:(id_1+1)*len_chunks]) <= cutoff 
    adj_list = np.where(block[:,:] == True)        
    adj_list = np.vstack(adj_list)

    adj_list[0] = adj_list[0]+id_0*len_chunks
    adj_list[1] = adj_list[1]+id_1*len_chunks

    if adj_list.shape[1] == 0:
        adj_list=np.zeros((2,1))

    graph = nx.Graph()
    edges = [(adj_list[0,k],adj_list[1,k]) for k in range(0,adj_list.shape[1])]
    graph.add_edges_from(edges)
    l = np.array({i: item for i, item in enumerate(sorted(nx.connected_components(graph)))}, dtype=np.object).reshape(1,1)

    return l
项目:MDAnalysis-with-Dask    作者:Becksteinlab    | 项目源码 | 文件源码
def Leaflet_finder(block, traj, cutoff, len_atom, len_chunks, block_id=None):
    id_0 = block_id[0]
    id_1 = block_id[1]

    block[:,:] = cdist(np.load(traj, mmap_mode='r')[id_0*len_chunks:(id_0+1)*len_chunks], np.load(traj, mmap_mode='r')[id_1*len_chunks:(id_1+1)*len_chunks]) <= cutoff 
    adj_list = np.where(block[:,:] == True)        
    adj_list = np.vstack(adj_list)

    adj_list[0] = adj_list[0]+id_0*len_chunks
    adj_list[1] = adj_list[1]+id_1*len_chunks

    if adj_list.shape[1] == 0:
        adj_list=np.zeros((2,1))

    graph = nx.Graph()
    edges = [(adj_list[0,k],adj_list[1,k]) for k in range(0,adj_list.shape[1])]
    graph.add_edges_from(edges)
    l = np.array({i: item for i, item in enumerate(sorted(nx.connected_components(graph)))}, dtype=np.object).reshape(1,1)

    return l
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def ER_Generateor(N=1000, M=3000):
    G = nx.gnm_random_graph(N, M)
    #component of the network
    if nx.is_connected(G) == False:
        # Get the Greatest Component of Networks #####
        components = sorted(nx.connected_components(G), key = len, reverse=True)
        print "Component Number of the Generated Network:", len(components)

        for i in range(1, len(components)):
            for node in components[i]:
                G.remove_node(node)
        #end for
        print nx.is_connected(G)
    #endif

    return G

#************************************************************************
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def SF_Generateor(N=1000, m=3):
    G = nx.barabasi_albert_graph(N, m)
    #component of the network
    if nx.is_connected(G) == False:
        # Get the Greatest Component of Networks #####
        components = sorted(nx.connected_components(G), key = len, reverse=True)
        print "Component Number of the Generated Network:", len(components)

        for i in range(1, len(components)):
            for node in components[i]:
                G.remove_node(node)
        #end for
        print nx.is_connected(G)
    #endif

    return G

#************************************************************************
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Optimal_Percolation_Simultaneous_Attack(G, Centrality):
    #print "Optimal_Percolation_Simultaneous_Attack"
    Gn = G.copy()
    Ranking = Ranking_methods.Nodes_Ranking(Gn, Centrality)
    Ranking = sorted(Ranking.iteritems(), key=lambda d:d[1], reverse = True)

    Giant_Component_Size_List = []
    Component_Num_List = []
    for nid in Ranking:
        G.remove_node(nid[0])
        ### Get the Greatest Component of Networks #####
        Components = sorted(nx.connected_components(G), key = len, reverse=True)
        if len(Components) >= 1:
            Giant_Component_Size = len(Components[0])
            if Giant_Component_Size > 1:
                Giant_Component_Size_List.append(Giant_Component_Size)
                Component_Num_List.append(len(Components))
    #end for
    return Giant_Component_Size_List,Component_Num_List

#************************************************************************
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def ER_Generateor(N=1000, M=3000):
    G = nx.gnm_random_graph(N, M)
    #component of the network
    if nx.is_connected(G) == False:
        # Get the Greatest Component of Networks #####
        components = sorted(nx.connected_components(G), key = len, reverse=True)
        print "Component Number of the Generated Network:", len(components)

        for i in range(1, len(components)):
            for node in components[i]:
                G.remove_node(node)
        #end for
        print nx.is_connected(G)
    #endif

    return G

#************************************************************************
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Optimal_Percolation_Simultaneous_Attack(G, Centrality):
    #print "Optimal_Percolation_Simultaneous_Attack"
    Gn = G.copy()
    Ranking = Ranking_methods.Nodes_Ranking(Gn, Centrality)
    Ranking = sorted(Ranking.iteritems(), key=lambda d:d[1], reverse = True)

    Giant_Component_Size_List = []
    Component_Num_List = []
    for nid in Ranking:
        G.remove_node(nid[0])
        ### Get the Greatest Component of Networks #####
        Components = sorted(nx.connected_components(G), key = len, reverse=True)
        if len(Components) >= 1:
            Giant_Component_Size = len(Components[0])
            if Giant_Component_Size > 1:
                Giant_Component_Size_List.append(Giant_Component_Size)
                Component_Num_List.append(len(Components))
    #end for
    return Giant_Component_Size_List,Component_Num_List

#************************************************************************
项目:PySCIPOpt    作者:SCIP-Interfaces    | 项目源码 | 文件源码
def findSubtours(self, checkonly, sol):
        EPS = 1.e-6
        edges = []
        x = self.model.data
        for (i, j) in x:
            if self.model.getSolVal(sol, x[i, j]) > EPS:
                edges.append((i,j))

        G = networkx.Graph()
        G.add_edges_from(edges)
        Components = list(networkx.connected_components(G))

        if len(Components) == 1:
            return False
        elif checkonly:
            return True

        for S in Components:
            self.model.addCons(quicksum(x[i, j] for i in S for j in S if j > i) <= len(S) - 1)
            print("cut: len(%s) <= %s" % (S, len(S) - 1))

        return True
项目:pyhiro    作者:wanweiwei07    | 项目源码 | 文件源码
def clusters(points, radius):
    '''
    Find clusters of points which have neighbours closer than radius

    Arguments
    ---------
    points: (n, d) points (of dimension d)
    radius: max distance between points in a cluster

    Returns:
    groups: (m) sequence of indices for points

    '''
    tree   = KDTree(points)
    pairs  = tree.query_pairs(radius)
    graph  = from_edgelist(pairs)
    groups = list(connected_components(graph))
    return groups
项目:pyhiro    作者:wanweiwei07    | 项目源码 | 文件源码
def smoothed(mesh, angle):
    '''
    Return a non- watertight version of the mesh which will
    render nicely with smooth shading. 

    Arguments
    ---------
    mesh:  Trimesh object
    angle: float, angle in radians, adjacent faces which have normals
           below this angle will be smoothed.

    Returns
    ---------
    smooth: Trimesh object
    '''
    adjacency = adjacency_angle(mesh, angle)
    graph = nx.from_edgelist(adjacency)
    graph.add_nodes_from(np.arange(len(mesh.faces)))
    smooth = mesh.submesh(nx.connected_components(graph),
                          only_watertight = False,
                          append = True)
    return smooth
项目:pyMABED    作者:AdrienGuille    | 项目源码 | 文件源码
def merge_redundant_events(self, events):
        # compute the connected components in the redundancy graph
        components = []
        for c in nx.connected_components(self.redundancy_graph):
            components.append(c)
        final_events = []

        # merge redundant events
        for event in events:
            main_word = event[2]
            main_term = main_word
            descriptions = []
            for component in components:
                if main_word in component:
                    main_term = ', '.join(component)
                    for node in component:
                        descriptions.append(self.redundancy_graph.node[node]['description'])
                    break
            if len(descriptions) == 0:
                related_words = event[3]
            else:
                related_words = self.merge_related_words(main_term, descriptions)
            final_event = (event[0], event[1], main_term, related_words, event[4])
            final_events.append(final_event)
        return final_events
项目:MultifurcationFeasibility    作者:Mathagoris    | 项目源码 | 文件源码
def get_conflicts(self):
        """Find irreconcilable connected components of leg."""
        conflicts = set()  # connected components with conflict
        for cc in nx.connected_components(self.leg):
            # key = species, val = set of loci in species for this cc
            loci_dct = collections.defaultdict(set)

            for label in cc:
                species, locus = label
                loci_dct[species].add(locus)

            for sp, loci in loci_dct.iteritems():
                # conflict if a species has more than one loci in this cc
                if len(loci) >= 2:
                    conflicts.add(tuple(cc))
                    break
        return conflicts
项目:MultifurcationFeasibility    作者:Mathagoris    | 项目源码 | 文件源码
def get_conflicts(leg):
    """Find irreconcilable connected components of leg."""
    conflicts = set() # connected components with conflict
    for cc in nx.connected_components(leg):
        # key = species, val = set of loci in species for this cc
        loci_dct = collections.defaultdict(set)

        for label in cc:
            species, locus = label
            loci_dct[species].add(locus)

        for sp, loci in loci_dct.iteritems():
            # conflict if a species has more than one loci in this cc
            if len(loci) >= 2:
                conflicts.add(tuple(cc))
                break
    return conflicts
项目:grocsvs    作者:grocsvs    | 项目源码 | 文件源码
def visualize_frags(outdir, graphs, options):
    from rpy2.robjects import r

    utilities.ensure_dir(outdir)

    for i, graph in enumerate(graphs):
        r.pdf(os.path.join(outdir, "fragments.cluster_{}.pdf".format(i)))

        for component in networkx.connected_components(graph):
            subgraph = graph.subgraph(component)

            ends = [node for node,degree in subgraph.degree_iter() if degree==1]
            breakends = [node for node in list(networkx.shortest_simple_paths(subgraph, ends[0], ends[1]))[0]]
            # breakends = [breakend_from_label(node) for node in breakends]
            breakends = breakends[:-1:2] + breakends[-1:]
            # print ")"*100, breakends

            for sample, dataset in sorted(options.iter_10xdatasets()):
                plot_frags(breakends, options, sample, dataset)
        # plot_frags(breakpoints, options, sample, dataset)
        r["dev.off"]()
项目:openbadge-analysis    作者:HumanDynamics    | 项目源码 | 文件源码
def extract_groups(m2m):
    """Extracts a list of groups from a social network varying through time.

    Groups are defined as connected components of the social graph at a given
    time bin.

    Parameters
    ----------
    m2m : pd.DataFrame
        The social network, for instance, member-to-member bluetooth proximity
        data.  It must have the following columns: 'datetime', 'member1', and
        'member2'.

    Returns
    -------
    pd.DataFrame :
        The groups, as a sets of members with datetime.
    """
    groups = m2m.groupby('datetime').apply(
        lambda df:
        pd.Series([frozenset(c) for c in nx.connected_components(nx.from_pandas_dataframe(df.reset_index(), 'member1', 'member2'))])
    )
    groups.name = 'members'

    return groups.reset_index()[['datetime', 'members']]
项目:saapy    作者:ashapochka    | 项目源码 | 文件源码
def group_similar_actors(self):
        similar_actor_groups = [list(g) for g in
                                nx.connected_components(self.actor_graph)]
        return similar_actor_groups
项目:saapy    作者:ashapochka    | 项目源码 | 文件源码
def _identify_actors(self, edges, nodes):
        author_graph = nx.Graph()
        author_graph.add_nodes_from(nodes)
        author_graph.add_edges_from(edges)
        same_actor_groups = [list(g) for g in
                             nx.connected_components(author_graph)]
        actors = [dict(primary_id=int(g[0]), group_ids=[int(gid) for gid in g])
                  for g in same_actor_groups]
        return actors
项目:MDAnalysis-with-Dask    作者:Becksteinlab    | 项目源码 | 文件源码
def Leaflet_finder(traj, i, j,len_atom, cutoff):
    atom = np.load(traj)
    g1 = atom[i:i+len_chunks]
    g2 = atom[j:j+len_chunks]
    block = np.zeros((len(g1),len(g2)),dtype=float)
    block[:,:] = cdist(g1, g2) <= cutoff
    S = scipy.sparse.dok_matrix((len_atom, len_atom))
    S[i:i+len_chunks, j:j+len_chunks] = block
    leaflet = sorted(nx.connected_components(nx.Graph(S>0)), key=len, reverse=True)
    l_connected = [] # Keep only connected components
    for lng in range(len(leaflet)):
        if(len(leaflet[lng])>1):l_connected.append(leaflet[lng])

    return list(l_connected)
项目:dnflow    作者:DocNow    | 项目源码 | 文件源码
def run(self):
        date_path = self.search['date_path']
        files = sorted(os.listdir('data/%s/media' % date_path))
        hashes = {}
        matches = []
        g = nx.Graph()
        update_block_size = get_block_size(len(files), 5)
        for i in range(len(files)):
            f = files[i]
            fn = 'data/%s/media/%s' % (date_path, f)
            ahash = imagehash.average_hash(Image.open(fn))
            dhash = imagehash.dhash(Image.open(fn))
            phash = imagehash.phash(Image.open(fn))
            hashes[f] = {'ahash': ahash, 'dhash': dhash, 'phash': phash}
            for j in range(0, i):
                f2name = files[j]
                f2 = hashes[f2name]
                sumhash = sum([ahash - f2['ahash'],
                               dhash - f2['dhash'],
                               phash - f2['phash']])
                # FIXME: 40 is a hard-coded arbitrary (eyeballed) threshold
                if sumhash <= 40:
                    matches.append([f, files[j],
                                    ahash - f2['ahash'],
                                    dhash - f2['dhash'],
                                    phash - f2['phash'],
                                    sumhash])
                    g.add_edge(f, f2name)
            if i % update_block_size == 0:
                self.update_job(
                    date_path=self.search['date_path'],
                    status="STARTED: %s - %s/%s" %
                           (self.task_family, i, len(files))
                )
        with self.output().open('w') as fp_graph:
            components = list(nx.connected_components(g))
            # Note: sets are not JSON serializable
            d = []
            for s in components:
                d.append(list(s))
            json.dump(d, fp_graph, indent=2)
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Optimal_Percolation_Sequence_Attack(G, Centrality, r=0.025):
    print "Optimal_Percolation_Sequence_Attack"
    Step = int(r*G.number_of_nodes())
    print Step
    Gn = G.copy()
    Ranking = Ranking_methods.Nodes_Ranking(Gn, Centrality)
    Ranking = sorted(Ranking.iteritems(), key=lambda d:d[1], reverse = True)
    #print Ranking
    G.remove_node(Ranking[0][0])

    ### Get the Greatest Component of Networks #####
    Giant_Component_Size_List = []
    Components = sorted(nx.connected_components(G), key = len, reverse=True)
    Giant_Component_Size = len(Components[0])
    Giant_Component_Size_List.append(Giant_Component_Size)
    #print "Components[0]:",Components[0]

    while Giant_Component_Size_List[-1] > 2 and Ranking != {}:
        Gn = G.copy()
        Ranking = Ranking_methods.Nodes_Ranking(Gn, Centrality)
        Ranking = sorted(Ranking.iteritems(), key=lambda d:d[1], reverse = True)
        #print Ranking
        if len(Ranking) > Step:
            for i in range(0,Step):
                G.remove_node(Ranking[i][0])
        Components = sorted(nx.connected_components(G), key = len, reverse=True)
        Giant_Component_Size = len(Components[0])
        Giant_Component_Size_List.append(Giant_Component_Size)

        #print "Giant_Component_Size_List, Components[0], Ranking:",Centrality, Giant_Component_Size_List, Components,G.edges(), Ranking
        #print "Sequence_attack:", Centrality, Ranking[0][0]
    #end while

    return Giant_Component_Size_List
#===============================================================================================
项目:PhD    作者:wutaoadeny    | 项目源码 | 文件源码
def Optimal_Percolation_Sequence_Attack(G, Centrality, r=0.025):
    print "Optimal_Percolation_Sequence_Attack"
    Step = int(r*G.number_of_nodes())
    print Step
    Gn = G.copy()
    Ranking = Ranking_methods.Nodes_Ranking(Gn, Centrality)
    Ranking = sorted(Ranking.iteritems(), key=lambda d:d[1], reverse = True)
    #print Ranking
    G.remove_node(Ranking[0][0])

    ### Get the Greatest Component of Networks #####
    Giant_Component_Size_List = []
    Components = sorted(nx.connected_components(G), key = len, reverse=True)
    Giant_Component_Size = len(Components[0])
    Giant_Component_Size_List.append(Giant_Component_Size)
    #print "Components[0]:",Components[0]

    while Giant_Component_Size_List[-1] > 2 and Ranking != {}:
        Gn = G.copy()
        Ranking = Ranking_methods.Nodes_Ranking(Gn, Centrality)
        Ranking = sorted(Ranking.iteritems(), key=lambda d:d[1], reverse = True)
        #print Ranking
        if len(Ranking) > Step:
            for i in range(0,Step):
                G.remove_node(Ranking[i][0])
        Components = sorted(nx.connected_components(G), key = len, reverse=True)
        Giant_Component_Size = len(Components[0])
        Giant_Component_Size_List.append(Giant_Component_Size)

        #print "Giant_Component_Size_List, Components[0], Ranking:",Centrality, Giant_Component_Size_List, Components,G.edges(), Ranking
        #print "Sequence_attack:", Centrality, Ranking[0][0]
    #end while

    return Giant_Component_Size_List
#===============================================================================================
项目:PySCIPOpt    作者:SCIP-Interfaces    | 项目源码 | 文件源码
def addCuts(self, checkonly):
        """add cuts if necessary and return whether model is feasible"""
        cutsadded = False
        edges = []
        x = self.model.data
        for (i, j) in x:
            if self.model.getVal(x[i, j]) > .5:
                if i != V[0] and j != V[0]:
                    edges.append((i, j))
        G = networkx.Graph()
        G.add_edges_from(edges)
        Components = list(networkx.connected_components(G))
        for S in Components:
            S_card = len(S)
            q_sum = sum(q[i] for i in S)
            NS = int(math.ceil(float(q_sum) / Q))
            S_edges = [(i, j) for i in S for j in S if i < j and (i, j) in edges]
            if S_card >= 3 and (len(S_edges) >= S_card or NS > 1):
                cutsadded = True
                if checkonly:
                    break
                else:
                    self.model.addCons(quicksum(x[i, j] for i in S for j in S if j > i) <= S_card - NS)
                    print("adding cut for", S_edges)

        return cutsadded
项目:PyPSA    作者:PyPSA    | 项目源码 | 文件源码
def busmap_by_linemask(network, mask):
    mask = network.lines.loc[:,['bus0', 'bus1']].assign(mask=mask).set_index(['bus0','bus1'])['mask']
    G = nx.OrderedGraph()
    G.add_nodes_from(network.buses.index)
    G.add_edges_from(mask.index[mask])
    return pd.Series(OrderedDict((n, str(i))
                                 for i, g in enumerate(nx.connected_components(G))
                                 for n in g),
                     name='name')
项目:visa_free    作者:BBischof    | 项目源码 | 文件源码
def communities(self, nCommunities, weight=None):
        """
        Compute communities.

        Parameters
        ----------
        nCommunities - number of communities to be returned.
            This is added to simplify the process, the original GN algorithm doesn't 
            need predecided number of communities. 
            Other measures like a threshold on betweenness centrality can be used instead.

        weight (string) - If None, all edge weights are considered equal. 
            Otherwise holds the name of the edge attribute used as weight. 


        Returns
        --------
        A list of communities where each community is a list of the nodes in the community.  
        """
        gr = self.g
        n = nx.number_connected_components(gr)
        components = nx.connected_components(gr)

        while (n < nCommunities):
            gr = self.communitySplits(gr, weight=weight)
            components = nx.connected_components(gr)
            n = nx.number_connected_components(gr)
            if gr.number_of_edges() == 0:
                break
        return components
项目:pyhiro    作者:wanweiwei07    | 项目源码 | 文件源码
def split(mesh, only_watertight=True, adjacency=None):
    '''
    Given a mesh, will split it up into a list of meshes based on face connectivity
    If only_watertight is true, it will only return meshes where each face has
    exactly 3 adjacent faces.

    Arguments
    ----------
    mesh: Trimesh 
    only_watertight: if True, only return watertight components
    adjacency: (n,2) list of face adjacency to override using the plain
               adjacency calculated automatically. 

    Returns
    ----------
    meshes: list of Trimesh objects
    '''

    def split_nx():
        adjacency_graph = nx.from_edgelist(adjacency)
        components = nx.connected_components(adjacency_graph)
        result = mesh.submesh(components, only_watertight=only_watertight)
        return result

    def split_gt():
        g = GTGraph()
        g.add_edge_list(adjacency)
        component_labels = label_components(g, directed=False)[0].a
        components = group(component_labels)
        result = mesh.submesh(components, only_watertight=only_watertight)
        return result

    if adjacency is None:
        adjacency = mesh.face_adjacency

    if _has_gt: 
        return split_gt()
    else:       
        return split_nx()
项目:SyConn    作者:StructuralNeurobiologyLab    | 项目源码 | 文件源码
def make_merge_list(hdf5names, stitch_list, max_labels):
    """
    Creates a merge list from a stitch list by mapping all connected ids to
    one id

    Parameters
    ----------
    hdf5names: list of str
        List of names/ labels to be extracted and processed from the prediction
        file
    stitch_list: dictionary
        Contains pairs of overlapping component ids for each hdf5name
    max_labels dictionary
        Contains the number of different component ids for each hdf5name

    Returns
    -------
    merge_dict: dictionary
        mergelist for each hdf5name
    merge_list_dict: dictionary
        mergedict for each hdf5name
    """

    merge_dict = {}
    merge_list_dict = {}
    for hdf5_name in hdf5names:
        this_stitch_list = stitch_list[hdf5_name]
        max_label = max_labels[hdf5_name]
        graph = nx.from_edgelist(this_stitch_list)
        cc = nx.connected_components(graph)
        merge_dict[hdf5_name] = {}
        merge_list_dict[hdf5_name] = np.arange(max_label + 1)
        for this_cc in cc:
            this_cc = list(this_cc)
            for id in this_cc:
                merge_dict[hdf5_name][id] = this_cc[0]
                merge_list_dict[hdf5_name][id] = this_cc[0]

    return merge_dict, merge_list_dict
项目:anomalous-vertices-detection    作者:Kagandi    | 项目源码 | 文件源码
def connected_components(self):
        """
        Parameters
        ----------

        Returns
        -------
        NxGraph: Graph object

        Examples
        --------
        >>>
        """
        return nx.connected_components(self._graph)
项目:MultifurcationFeasibility    作者:Mathagoris    | 项目源码 | 文件源码
def draw_leg(self):
        # nx.draw(self.LEG)
        print "Connected Components of LEG:\n" + str(list(nx.connected_components(self.leg)))
项目:MultifurcationFeasibility    作者:Mathagoris    | 项目源码 | 文件源码
def is_feasible(self):
        for cc in nx.connected_components(self.leg):
            loci_dct = collections.defaultdict(set)
            for label in cc:
                species, locus = label
                loci_dct[species].add(locus)
            for species in loci_dct.keys():
                if len(loci_dct[species]) >= 2:
                    return False
        return True
项目:MultifurcationFeasibility    作者:Mathagoris    | 项目源码 | 文件源码
def is_feasible(self):
        for cc in nx.connected_components(self.LEG):
            for i in xrange(0, len(cc)):
                for j in xrange(i+1, len(cc)):
                    if list(cc)[i].split('_')[0] == list(cc)[j].split('_')[0]:
                        return False
        return True
项目:grocsvs    作者:grocsvs    | 项目源码 | 文件源码
def get_subgraphs(graph):
    subgraphs = []
    for subgraph in networkx.connected_components(graph):
        if len(subgraph) > 0:
            subgraphs.append(graph.subgraph(subgraph))
    return subgraphs
项目:cluster_paraphrases    作者:acocos    | 项目源码 | 文件源码
def sem_clust(self, w2p, simsdict):
        ''' Baseline SEMCLUST method (dynamic thresholding), based on:

        Marianna Apidianaki, Emilia Verzeni, and Diana McCarthy. Semantic
        Clustering of Pivot Paraphrases. In LREC 2014.

        Builds a graph where nodes are words, and edges connect words that
        have a connection in <w2p>. Weights edges by the values given in
        <simsdict>.
        :param w2p: word -> {paraphrase: score} dictionary, used to decide which nodes to connect with edges
        :param simsdict: word -> {paraphrase: score} OR word -> vector, used for edge weights
        :return:
        '''
        self.reset_sense_clustering()
        wordlist = self.pp_dict.keys()

        oov = [w for w in wordlist if w not in w2p or w not in simsdict]
        if len(oov) > 0:
            sys.stderr.write('WARNING: Paraphrases %s are OOV. '
                             'Removing from ppset.\n' % str(oov))
            wordlist = list(set(wordlist) - set(oov))

        if len(wordlist) == 1:
            self.add_sense_cluster([wordlist[0]])
            return

        # Using cosine similarity of word-paraphrase vectors:
        if type(simsdict.values()[0]) != dict:
            similarities = np.array([[1-cosine(simsdict[i], simsdict[j])
                                      for j in wordlist] for i in wordlist])
        else:
            similarities = np.array([[(1-dict_cosine_dist(simsdict[i], simsdict[j]))
                                      for j in wordlist] for i in wordlist])

        gr = sem_clust.toGraph(similarities, wordlist, self.target_word, w2p)

        for c in nx.connected_components(gr):
            self.add_sense_cluster(c)
项目:bioconda-utils    作者:bioconda    | 项目源码 | 文件源码
def dag(recipe_folder, config, packages="*", format='gml', hide_singletons=False):
    """
    Export the DAG of packages to a graph format file for visualization
    """
    dag, name2recipes = utils.get_dag(utils.get_recipes(recipe_folder, packages), config)
    if hide_singletons:
        for node in nx.nodes(dag):
            if dag.degree(node) == 0:
                dag.remove_node(node)
    if format == 'gml':
        nx.write_gml(dag, sys.stdout.buffer)
    elif format == 'dot':
        write_dot(dag, sys.stdout)
    elif format == 'txt':
        subdags = sorted(map(sorted, nx.connected_components(dag.to_undirected())))
        subdags = sorted(subdags, key=len, reverse=True)
        singletons = []
        for i, s in enumerate(subdags):
            if len(s) == 1:
                singletons += s
                continue
            print("# subdag {0}".format(i))
            subdag = dag.subgraph(s)
            recipes = [
                recipe for package in nx.topological_sort(subdag)
                for recipe in name2recipes[package]]
            print('\n'.join(recipes) + '\n')
        if not hide_singletons:
            print('# singletons')
            recipes = [recipe for package in singletons for recipe in
                       name2recipes[package]]
            print('\n'.join(recipes) + '\n')
项目:kevlar    作者:dib-lab    | 项目源码 | 文件源码
def test_graph_init():
    """Test graph initialization."""
    instream = kevlar.open(data_file('var1.reads.augfastq'), 'r')
    graph = kevlar.ReadGraph()
    graph.load(kevlar.parse_augmented_fastx(instream))
    graph.populate_edges(strict=True)

    # 10 reads in the file, but read16f has no valid connections due to error
    assert len(graph.nodes()) == 10

    # The given read shares its interesting k-mer and has compatible overlaps
    # with 6 other reads (read13f and read15f have errors).
    r23name = 'read23f start=67,mutations=0'
    assert len(graph[r23name]) == 6

    # Test the values of one of the edges.
    r35name = 'read35f start=25,mutations=0'
    assert graph[r23name][r35name]['offset'] == 42
    assert graph[r23name][r35name]['overlap'] == 58

    # Should all be a single CC
    assert len(list(connected_components(graph))) == 2
    assert len([p for p in graph.partitions()]) == 1

    r8name = 'read8f start=8,mutations=0'
    r37name = 'read37f start=9,mutations=0'
    assert graph[r37name][r8name]['offset'] == 1
    assert graph[r37name][r8name]['overlap'] == 99
    pair = OverlappingReadPair(
        tail=graph.get_record(r8name), head=graph.get_record(r37name),
        offset=1, overlap=99, sameorient=True, swapped=False
    )
    assert merge_pair(pair) == ('CACTGTCCTTACAGGTGGATAGTCGCTTTGTAATAAAAGAGTTAC'
                                'ACCCCGGTTTTTAGAAGTCTCGACTTTAAGGAAGTGGGCCTACGG'
                                'CGGAAGCCGTC')
项目:kevlar    作者:dib-lab    | 项目源码 | 文件源码
def partitions(self, dedup=True, minabund=None, maxabund=None,
                   abundfilt=False):
        """
        Retrieve all partitions (connected components) from this graph.

        The `minabund` and `maxabund` parameters are used at graph construction
        time to filter out k-mers whose abundance is too large or small. If
        `abundfilt` is true, the minimum bundance is also applied to the number
        of sequences (reads or contigs) in the partition.
        """
        for cc in sorted(networkx.connected_components(self), reverse=True,
                         # Sort first by number of reads, then by read names
                         key=lambda c: (len(c), sorted(c))):
            if len(cc) == 1 and list(cc)[0] in self.readnames:
                continue  # Skip unassembled input reads
            if dedup:
                partition = ReadGraph()
                readstream = [self.get_record(readid) for readid in cc]
                partition.load(readstream, minabund, maxabund, dedup=True)
                assert partition.number_of_nodes() > 0
                if abundfilt:
                    if minabund and partition.number_of_nodes() < minabund:
                        continue  # Skip partitions that are too small
                yield partition
            else:
                yield cc
项目:maple    作者:Zhengzi    | 项目源码 | 文件源码
def extract_inter_function_cfg():

    #loop over every segments
    #seg: the starting address for each segments

    #initialized a directed graph to store cfg
    cfg = nx.DiGraph()

    for seg in Segments():  

        if SegName(seg) == ".text" or SegName(seg) == ".plt":

            functions = Functions(seg)      
            for func_ea in functions:   

                #It will add some isolated node into the graph
                cfg.add_node(func_ea)

                for ref in CodeRefsTo(func_ea, 1):              
                    calling_func = get_func(ref)
                    if not calling_func:
                        continue
                    calling_func_startEA = calling_func.startEA
                    cfg.add_edge(calling_func_startEA, func_ea)

            #for ref in CodeRefsFrom(func_ea, 1):
            #   print "  calling %s(0x%x)" % (GetFunctionName(ref), ref)
    nodes = cfg.nodes()
    for node in nodes:
        ns = cfg.successors(node)
        if not ns:
            print hex(node)

    #print nx.connected_components(cfg)
    #nx.draw(cfg)
    #plt.show()
    #plt.savefig("graph.png", dpi=1000)
    ''' 
    #testing
    print cfg.number_of_nodes()
    print cfg.number_of_edges()
    #print cfg.node()
    nodes = cfg.nodes()
    print
    for node in nodes:
        print "parent: "
        print hex(node)
        print "child: "
        ns = cfg.successors(node)
        for n in ns:
            print hex(n)
    #endtesting
    '''
    return cfg
项目:pyhiro    作者:wanweiwei07    | 项目源码 | 文件源码
def face_adjacency(faces, return_edges=False):
    '''
    Returns an (n,2) list of face indices.
    Each pair of faces in the list shares an edge, making them adjacent.


    Arguments
    ----------
    faces: (n, d) int, set of faces referencing vertices by index
    return_edges: bool, return the edges shared by adjacent faces

    Returns
    ---------
    adjacency: (m,2) int, indexes of faces that are adjacent

    if return_edges: 
         edges: (m,2) int, indexes of vertices which make up the 
                 edges shared by the adjacent faces

    Example
    ----------
    This is useful for lots of things, such as finding connected components:

    graph = nx.Graph()
    graph.add_edges_from(mesh.face_adjacency)
    groups = nx.connected_components(graph_connected)
    '''

    # first generate the list of edges for the current faces
    # also return the index for which face the edge is from
    edges, edge_face_index = faces_to_edges(faces, return_index = True)
    edges.sort(axis=1)
    # this will return the indices for duplicate edges
    # every edge appears twice in a well constructed mesh
    # so for every row in edge_idx, edges[edge_idx[*][0]] == edges[edge_idx[*][1]]
    # in this call to group rows, we discard edges which don't occur twice
    edge_groups = group_rows(edges, require_count=2)

    if len(edge_groups) == 0:
        log.error('No adjacent faces detected! Did you merge vertices?')

    # the pairs of all adjacent faces
    # so for every row in face_idx, self.faces[face_idx[*][0]] and 
    # self.faces[face_idx[*][1]] will share an edge
    face_adjacency = edge_face_index[edge_groups]
    if return_edges:
        face_adjacency_edges = edges[edge_groups[:,0]]
        return face_adjacency, face_adjacency_edges
    return face_adjacency