Python networkx 模块,isolates() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用networkx.isolates()

项目:Christofides    作者:dsrahul30    | 项目源码 | 文件源码
def Euler_Tour(multigraph):
    """ Uses Fleury's algorithm to find the Euler Tour of the MultiGraph.

    """
    tour = []
    temp_graph = nx.MultiGraph()
    graph_nodes = nx.nodes(multigraph)
    current_node = graph_nodes[0]
    tour.append(current_node)
    while nx.number_of_edges(multigraph) > 0:   
        for edge in multigraph.edges(current_node):
            temp_graph = copy.deepcopy(multigraph)
            temp_graph.remove_edge(edge[0], edge[1], key=None)
            if nx.is_connected(temp_graph):
                tour.append(edge[1])
                current_node = edge[1]
                multigraph.remove_edge(edge[0], edge[1], key=None)
                break
        else:
            tour.append(edge[1])
            current_node = edge[1]
            multigraph.remove_edge(edge[0], edge[1], key=None)
            multigraph.remove_nodes_from(nx.isolates(multigraph))
    return tour
项目:sparse-digraph-generator    作者:papoudakis    | 项目源码 | 文件源码
def sdg(num_nodes, num_edges, epsilon1, epsilon2):
    gen_graph = nx.DiGraph()
    nodes = range(num_nodes)
    gen_graph.add_nodes_from(nodes)
    i = 0
    while i < num_edges:
        # choose out node uniformly
        if random.random() < epsilon1 or len(gen_graph.edges()) < 1:
            out_node = random.choice(nodes)
        # choose out based on preferential attachment
        else:
            out_nodes = gen_graph.out_degree().keys()
            out_degree = np.array(gen_graph.out_degree().values())

            out_node = np.random.choice(out_nodes, p=out_degree/(1.0 * i))
        # choose in node uniformly
        if random.random() < epsilon2 or len(gen_graph.edges()) < 1:
            if 0 not in gen_graph.in_degree().values():
                in_node = random.choice(nodes)
        # choose in based on preferential attachment
            else:

                if len(nx.isolates(gen_graph)) > 0:
                    in_node = random.choice(nx.isolates(gen_graph))
                else:
                    in_node_degree = 1
                    while in_node_degree > 0:
                        in_node = random.choice(gen_graph.nodes())
                        in_node_degree = gen_graph.in_degree(in_node)
        else:
            in_nodes = gen_graph.in_degree().keys()
            in_degree = np.array(gen_graph.in_degree().values())
            in_node = np.random.choice(in_nodes, p=in_degree / (1.0 * i))

        if out_node != in_node and (out_node, in_node) not in gen_graph.edges():
            gen_graph.add_edge(out_node, in_node)
            i += 1

    return gen_graph
项目:robograph    作者:csparpa    | 项目源码 | 文件源码
def has_isles(self):
        """
        Tells if the graph has subgraphs. If so, it means that the graph has at
        least one node that is "isolated" from the bigger graph component.
        :return: bool
        """
        return len(nx.isolates(self._nxgraph)) != 0
项目:sidewalkify    作者:AccessMap    | 项目源码 | 文件源码
def process_acyclic(G):
    paths = []
    while True:
        # Handle 'endpoint' starts - certainly acyclic
        n = len(paths)
        # We want to start with a dangling node - an edge that terminates.
        # Features of that dangling node:
        #   1) degree of node inputs is 1 or 0
        #   2) degree of node outputs is 1.
        #   3) if degree of node inputs is 1, the input to the node is the
        #      output to the node.

        # Identify candidates first - don't want to create/remove candidates
        # by updating at the same time
        candidates = []
        for node in G.nodes():
            predecessors = list(G.predecessors(node))
            successors = list(G.successors(node))
            in_degree = len(predecessors)
            out_degree = len(successors)

            if out_degree == 1:
                if in_degree == 0:
                    candidates.append(node)
                elif in_degree == 1:
                    if successors == predecessors:
                        candidates.append(node)

        # Create paths for every candidate
        for candidate in candidates:
            # If this point is reached, it's a dangle!
            paths.append(find_path(G, candidate, list(G[candidate])[0]))

        G.remove_nodes_from(list(nx.isolates(G)))
        if n == len(paths):
            # No change since last pass = exhausted attempts
            break
    return paths
项目:sidewalkify    作者:AccessMap    | 项目源码 | 文件源码
def process_cyclic(G):
    paths = []
    while True:
        # Pick the next edge (or random - there's no strategy here)
        try:
            edge = next(iter(G.edges()))
        except StopIteration:
            break
        # Start traveling
        paths.append(find_path(G, edge[0], edge[1], cyclic=True))
        # G.remove_nodes_from(nx.isolates(G))
    return paths
项目:pybel-tools    作者:pybel    | 项目源码 | 文件源码
def remove_isolated_nodes(graph):
    """Removes isolated nodes from the network

    :param pybel.BELGraph graph: A BEL graph
    """
    nodes = list(nx.isolates(graph))
    graph.remove_nodes_from(nodes)
项目:prov2bigchaindb    作者:DLR-SC    | 项目源码 | 文件源码
def calculate_account_data(prov_document: provmodel.ProvDocument) -> list:
        """
        Transforms a ProvDocument into a list of tuples including: 
        ProvAgent, list of ProvRelations from agent,
        list of ProvElements associated to ProvAgent,
        list of Namespaces

        :param prov_document: Document to transform
        :type prov_document:
        :return: List of tuples(ProvAgent, list(), list(), list())
        :rtype: list
        """

        namespaces = prov_document.get_registered_namespaces()
        g = provgraph.prov_to_graph(prov_document=prov_document)
        sorted_nodes = topological_sort(g, reverse=True)
        agents = list(filter(lambda elem: isinstance(elem, provmodel.ProvAgent), sorted_nodes))
        elements = list(filter(lambda elem: not isinstance(elem, provmodel.ProvAgent), sorted_nodes))

        # Check on compatibility
        if not is_directed_acyclic_graph(g):
            raise Exception("Provenance graph is not acyclic")
        if isolates(g):
            raise Exception("Provenance not compatible with role-based concept. Has isolated Elements")
        for element in elements:
            if provmodel.ProvAgent not in [type(n) for n in g.neighbors(element)]:
                raise Exception(
                    "Provenance not compatible with role-based concept. Element {} has not relation to any agent".format(
                        element))

        accounts = []
        for agent in agents:
            # find out-going relations from agent
            agent_relations = []
            for u, v in g.out_edges(agent):
                # Todo check if filter does not left out some info
                agent_relations.append(g.get_edge_data(u, v)[0]['relation'])

            agent_elements = {}
            i = 0
            for element in elements:
                element_relations = []
                if g.has_edge(element, agent):
                    for u, v in set(g.out_edges(element)):
                        for relation in g[u][v].values():
                            element_relations.append(relation['relation'])
                    agent_elements[i] = {element: element_relations}
                    i += 1

            accounts.append((agent, agent_relations, agent_elements, namespaces))
        return accounts
项目:KDDCUP2016    作者:hugochan    | 项目源码 | 文件源码
def search(self, query, exclude=[], limit=50, force=False, yc=2013):

        graph = build_graph(query,
                            self.params['K'],
                            self.params['H'],
                            self.params['min_topic_lift'],
                            self.params['min_ngram_lift'],
                            exclude, force, load=True)


        # Remove isolated nodes
        graph.remove_nodes_from(nx.isolates(graph))

        # Simple method to check if node is a document node.
        is_pub = lambda n: (graph.node[n]["type"] == "paper")

        # Get year median to replace missing values.
        # npapers = 0
        years = []
        for u in graph.nodes() :
            if is_pub(u) :
                # npapers += 1
                if (graph.node[u]["year"] > 0) :
                    years.append(graph.node[u]["year"])

        year_median = np.median(years)

        yo = 1950
        wcits = defaultdict(float)
        for u in graph.nodes():
            if is_pub(u):
                weighted_citation = 0
                nc = 0 # citation count

                year = graph.node[u]["year"]
                if year == 0: # missing
                    year = year_median
                elif year < yo or year > yc: # truncate
                    year = max(min(year, yc), yo)
                age_decay = np.exp(-self.params["age_relev"]*(yc-year))

                query_sim = np.exp(-self.params["query_relev"]*(1.0-graph.node[u]["query_score"]))

                in_edges = graph.in_edges(u, data=True)
                for v, _, atts in in_edges:
                    if is_pub(v):
                        ctx_sim = np.exp(-self.params["ctx_relev"]*(1.0-atts["weight"]))
                        weighted_citation += ctx_sim*query_sim*age_decay
                        nc += 1

                if nc > 0:
                    weighted_citation = weighted_citation*nc**(-self.params["beta"])
                wcits[graph.node[u]["entity_id"]] = weighted_citation

        # Sort dict by value
        sorted_wcits = sorted(wcits.items(), key=(lambda (k, v): v), reverse=True)
        ids, _wcits = zip(*sorted_wcits)
        return ids[:limit]