Python networkx 模块,read_gml() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用networkx.read_gml()

项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def test_edges_to_graph_gml(self):
        graph = nx.read_gml(fixtures_path('graph.gml'))
        with open(fixtures_path('graph.json'), 'r') as json_graph:
            edges = json.load(json_graph)
            out = nx.parse_gml(utils.edges_to_graph(edges, fmt='gml'))
        assert nodeset(out) == nodeset(graph)
        assert edgeset(out) == edgeset(graph)
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def test_command_graph(self):
        matches = [
            ((0, 0), (3, 3)),
            ((1, 11), (4, 14)),
            ((8, 12), (11, 15)),
        ]
        regions = utils.serialize_json(matches)
        result = self.runner.invoke(
            cli.graph,
            [regions, self.image_grid]
        )
        out = nx.parse_graphml(result.output.strip())
        graph = nx.read_gml(fixtures_path('graph.gml'))
        assert nodeset(out) == nodeset(graph)
        assert edgeset(out) == edgeset(graph)
项目:maple    作者:Zhengzi    | 项目源码 | 文件源码
def main():
    #extract_intra_function_cfg("C:\\Users\\Xu Zhengzi\\Desktop\\oh\\")
    cfg1 = nx.read_gml("C:\\Users\\Xu Zhengzi\\Desktop\\og\\dtls1_reassemble_fragment.gml")
    cfg2 = nx.read_gml("C:\\Users\\Xu Zhengzi\\Desktop\\oh\\dtls1_reassemble_fragment.gml")
    nodes1 = ['0x80c0b14', '0x80c0b9a', '0x80c0c3c', '0x80c0c57', '0x80c0c5d', '0x80c0c8c', '0x80c0ccc', '0x80c0d0a', '0x80c0d2c', '0x80c0e83', '0x80c0fb4', '0x80c0eb6', '0x80c0f53', '0x80c0b97', '0x80c0d88', '0x80c0de1', '0x80c0db5', '0x80c0fac', '0x80c0f73', '0x80c0dd9']
    extract_trace(cfg1, 3, nodes1)

    print "Finish"
项目:maple    作者:Zhengzi    | 项目源码 | 文件源码
def main(): 
    #wait until IDA finishing loading the project
    idaapi.autoWait()
    #extract_intra_function_cfg("C:\\Users\\Xu Zhengzi\\Desktop\\oh\\")
    cfg1 = nx.read_gml("C:\\Users\\Xu Zhengzi\\Desktop\\og\\dtls1_reassemble_fragment.gml")
    cfg2 = nx.read_gml("C:\\Users\\Xu Zhengzi\\Desktop\\oh\\dtls1_reassemble_fragment.gml")
    nodes1 = ['0x80c0b14', '0x80c0b9a', '0x80c0c3c', '0x80c0c57', '0x80c0c5d', '0x80c0c8c', '0x80c0ccc', '0x80c0d0a', '0x80c0d2c', '0x80c0e83', '0x80c0fb4', '0x80c0eb6', '0x80c0f53', '0x80c0b97', '0x80c0d88', '0x80c0de1', '0x80c0db5', '0x80c0fac', '0x80c0f73', '0x80c0dd9']
    extract_trace(cfg1, 3, nodes1)

    print "Finish"
项目:maple    作者:Zhengzi    | 项目源码 | 文件源码
def load_3(gml1,gml2,name):
    g1 = nx.read_gml(gml1)          
    g2 = nx.read_gml(gml2)

    q1 = qucik_hash(g1)
    q2 = qucik_hash(g2)

    if not q1:
        return 0

    if not q2:
        return 0

    v1 = q1[1]
    v2 = q2[1]
    s1 = q1[0]
    s2 = q2[0]

    if v1 == v2:
        #print "skip"
        return 0
    #print s1   
    #print s2

    to_write = []

    to_write.append(name)

    with open("result_openssl.txt", "a") as myfile:
        for item in to_write:
            myfile.write(item)
            myfile.write("\n")
    return 1
项目:maple    作者:Zhengzi    | 项目源码 | 文件源码
def merge_node(path,new_path):
    g = nx.read_gml(path)
    nodes = [n for n,d in g.out_degree().items() if d==1]
    for node in nodes:
        if not node in g.nodes():
            continue

        if g.in_degree(node) != 1:
            continue    
        p = g.successors(node)
        #print p
        #dict = g.in_degree(p)
        #print dict[p]
        #print g.in_degree(p)[p[0]]
        if g.in_degree(p)[p[0]] == 1:
            text1 = g.node[node]["text"]
            text1 = remove_last_jump(text1)
            text2 = g.node[p[0]]["text"]

            #print text1
            #print text2

            new_text = text1 + ',' + text2
            #print new_text

            nns = g.successors(p[0])
            g.node[node]["text"] = new_text

            for n in nns:
                g.add_edge(node, n)
            g.remove_node(p[0])
    nx.write_gml(g, new_path)
    return nx.number_of_nodes(g)
项目:observations    作者:edwardlib    | 项目源码 | 文件源码
def celegans(path):
  """Load the neural network of the worm C. Elegans [@watts1998collective].
  The neural network consists of around 300 neurons. Each connection
  between neurons is associated with a weight (positive integer)
  capturing the strength of the connection.

  Args:
    path: str.
      Path to directory which either stores file or otherwise file will
      be downloaded and extracted there. Filename is `celegansneural.gml`.

  Returns:
    Adjacency matrix as a np.darray `x_train` with 297 rows and 297
    columns.
  """
  import networkx as nx
  path = os.path.expanduser(path)
  filename = 'celegansneural.gml'
  if not os.path.exists(os.path.join(path, filename)):
    url = 'http://www-personal.umich.edu/~mejn/netdata/celegansneural.zip'
    maybe_download_and_extract(path, url)

  graph = nx.read_gml(os.path.join(path, filename))
  x_train = np.zeros([graph.number_of_nodes(), graph.number_of_nodes()],
                     dtype=np.int)
  for i, j in graph.edges():
    x_train[i, j] = int(graph[i][j][0]['value'])
  return x_train
项目:observations    作者:edwardlib    | 项目源码 | 文件源码
def karate(path):
  """Load Zachary's Karate Club [@zachary1977information].
  It is a social network of friendships between 34 members of a karate
  club at a US university from 1970 to 1972. During the study a
  conflict between instructor 'Mr. Hi' and administrator 'Officer' led
  the club to split into two. Half of the members formed a new club
  around 'Mr.  Hi'; other members found a new instructor or quit karate.

  Args:
    path: str.
      Path to directory which either stores file or otherwise file will
      be downloaded and extracted there. Filename is `karate.gml`.

  Returns:
    Tuple of adjacency matrix as a np.darray `x_train` with 34 rows
    and 34 columns and np.darray `y_train` of class memberships (0 for
    'Mr.Hi' and 1 for 'Officer').
  """
  import networkx as nx
  path = os.path.expanduser(path)
  filename = 'karate.gml'
  if not os.path.exists(os.path.join(path, filename)):
    url = 'http://www-personal.umich.edu/~mejn/netdata/karate.zip'
    maybe_download_and_extract(path, url)

  x_train = nx.read_gml(os.path.join(path, filename))
  x_train = nx.to_numpy_matrix(x_train).astype(int)
  labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21]
  y_train = np.array([0 if i in labels else 1
                      for i in range(x_train.shape[0])], dtype=np.int)
  return x_train, y_train
项目:PBSuite    作者:dbrowneup    | 项目源码 | 文件源码
def __loadGMLFiles__(self):
        """
        Iterates through all of the files inside of support directory
        merges these graphs with the whole gapGraph
        creates a dictionary of {"readName":[gapName or nodeName,...] }
        """
        self.readSupport = defaultdict(list)
        for i in glob.glob(os.path.join(self.protocol.outDir,"support","*.gml")):
            truncNxVer = float('.'.join(networkx.__version__.split('.')[:2]))
            if truncNxVer >= 1.7:
                try:
                    inputGml = networkx.read_gml(i, relabel=True)
                except ValueError:
                    logging.warning("GML file %s is empty" % i)
                    continue
            elif truncNxVer == 1.1:
                inputGml = networkx.read_gml(i)
            else:
                logging.warning("It is unknown if networkx version %s will work." % networkx.__version__)
                logging.warning("If you get an error here, please report it!!!")
                inputGml = networkx.read_gml(i)

            for node in inputGml.nodes_iter():
                for readName in inputGml.node[node]['extenders'].split(':'):
                    if readName == '':
                        continue
                    trimInfo = self.__cleanReadName__(readName)
                    self.gapGraph.add_extend(node, trimInfo.name)
                    self.readSupport[trimInfo.name].append((node, trimInfo))

            for source, target, evidence in inputGml.edges_iter(data=True):
                for readName in evidence['evidence'].split(':'):
                    if readName == '':
                        continue
                    trimInfo = self.__cleanReadName__(readName)
                    self.gapGraph.add_evidence(source, target, trimInfo.name)
                    self.readSupport[trimInfo.name].append((source, target, trimInfo))
        self.readSupport = dict(self.readSupport)
项目:PBSuite    作者:dbrowneup    | 项目源码 | 文件源码
def __loadGMLFiles__(self):
        """
        Iterates through all of the files inside of support directory
        merges these graphs with the whole gapGraph
        creates a dictionary of {"readName":[gapName or nodeName,...] }
        """
        self.readSupport = defaultdict(list)
        for i in glob.glob(os.path.join(self.protocol.outDir,"support","*.gml")):
            truncNxVer = float('.'.join(networkx.__version__.split('.')[:2]))
            if truncNxVer >= 1.7:
                try:
                    inputGml = networkx.read_gml(i, relabel=True)
                except ValueError:
                    logging.warning("GML file %s is empty" % i)
                    continue
            elif truncNxVer == 1.1:
                inputGml = networkx.read_gml(i)
            else:
                logging.warning("It is unknown if networkx version %s will work." % networkx.__version__)
                logging.warning("If you get an error here, please report it!!!")
                inputGml = networkx.read_gml(i)

            for node in inputGml.nodes_iter():
                for readName in inputGml.node[node]['extenders'].split(':'):
                    if readName == '':
                        continue
                    trimInfo = self.__cleanReadName__(readName)
                    self.gapGraph.add_extend(node, trimInfo.name)
                    self.readSupport[trimInfo.name].append((node, trimInfo))

            for source, target, evidence in inputGml.edges_iter(data=True):
                for readName in evidence['evidence'].split(':'):
                    if readName == '':
                        continue
                    trimInfo = self.__cleanReadName__(readName)
                    self.gapGraph.add_evidence(source, target, trimInfo.name)
                    self.readSupport[trimInfo.name].append((source, target, trimInfo))
        self.readSupport = dict(self.readSupport)
项目:ez-segway    作者:thanh-nguyen-dang    | 项目源码 | 文件源码
def read_zoo_topology_gml(zoo_file):
        g = nx.read_gml(zoo_file, label="id")
        log.info(g.nodes(data=True))
        for n, items in g.nodes_iter(data=True):
            log.info("node {0}: {1}".format(str(n), str(items)))
        return Topology(nx.Graph(g.to_directed()))
项目:maple    作者:Zhengzi    | 项目源码 | 文件源码
def load(gml1,gml2,name):
    g1 = nx.read_gml(gml1)          
    g2 = nx.read_gml(gml2)  
    s1 = t(g1)
    s2 = t(g2)
    #print s1   
    #print s2

    with open("result.txt", "a") as myfile:
        myfile.write(name)
        myfile.write("\n") 

        m = lcs(s1,s2)

        index = find_index(m)


        match1 = []
        match2 = []

        for item in index:

            myfile.write(hex(s1[item[0]][0]) + " " + hex(s2[item[1]][0]))
            myfile.write("\n") 

            #print hex(s1[item[0]][0]) + " " + hex(s2[item[1]][0])
            match1.append(s1[item[0]][0])
            match2.append(s2[item[1]][0])

        myfile.write("o")
        myfile.write("\n")  

        for item in s1:
            if item[0] not in match1:
                #print hex(item[0])
                myfile.write(hex(item[0]))
                myfile.write("\n") 

        myfile.write("p")
        myfile.write("\n") 

        for item in s2:
            if item[0] not in match2:
                #print hex(item[0])
                myfile.write(hex(item[0]))
                myfile.write("\n") 

        #print  
    return 0
项目:k-clique-graphs-dense-subgraphs    作者:giannisnik    | 项目源码 | 文件源码
def read_gml(path, relabel=False):
    """Read graph in GML format from path.

    Parameters
    ----------
    path : filename or filehandle
       The filename or filehandle to read from.

    relabel : bool, optional
       If True use the GML node label attribute for node names otherwise use
       the node id.

    Returns
    -------
    G : MultiGraph or MultiDiGraph

    Raises
    ------
    ImportError
        If the pyparsing module is not available.

    See Also
    --------
    write_gml, parse_gml

    Notes
    -----
    Requires pyparsing: http://pyparsing.wikispaces.com/
    The GML specification says that files should be ASCII encoded, with any
    extended ASCII characters (iso8859-1) appearing as HTML character entities.

    References
    ----------
    GML specification:
    http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html

    Examples
    --------
    >>> G=nx.path_graph(4)
    >>> nx.write_gml(G,'test.gml')
    >>> H=nx.read_gml('test.gml')
    """
    lines = (unescape(line.decode('ascii')) for line in path)
    G = parse_gml(lines, relabel=relabel)
    return G
项目:k-clique-graphs-dense-subgraphs    作者:giannisnik    | 项目源码 | 文件源码
def pyparse_gml():
    """A pyparsing tokenizer for GML graph format.

    This is not intended to be called directly.

    See Also
    --------
    write_gml, read_gml, parse_gml
    """
    try:
        from pyparsing import \
             Literal, CaselessLiteral, Word, Forward,\
             ZeroOrMore, Group, Dict, Optional, Combine,\
             ParseException, restOfLine, White, alphas, alphanums, nums,\
             OneOrMore,quotedString,removeQuotes,dblQuotedString, Regex
    except ImportError:
        try:
            from matplotlib.pyparsing import \
             Literal, CaselessLiteral, Word, Forward,\
             ZeroOrMore, Group, Dict, Optional, Combine,\
             ParseException, restOfLine, White, alphas, alphanums, nums,\
             OneOrMore,quotedString,removeQuotes,dblQuotedString, Regex
        except:
            raise ImportError('pyparsing not found',
                              'http://pyparsing.wikispaces.com/')

    lbrack = Literal("[").suppress()
    rbrack = Literal("]").suppress()
    pound = ("#")
    comment = pound + Optional( restOfLine )
    integer = Word(nums+'-').setParseAction(lambda s,l,t:[ int(t[0])])
    real = Regex(r"[+-]?\d+\.\d*([eE][+-]?\d+)?").setParseAction(
        lambda s,l,t:[ float(t[0]) ])
    dblQuotedString.setParseAction( removeQuotes )
    key = Word(alphas,alphanums+'_')
    value_atom = (real | integer | Word(alphanums) | dblQuotedString)
    value = Forward()   # to be defined later with << operator
    keyvalue = Group(key+value)
    value << (value_atom | Group( lbrack + ZeroOrMore(keyvalue) + rbrack ))
    node = Group(Literal("node") + lbrack + Group(OneOrMore(keyvalue)) + rbrack)
    edge = Group(Literal("edge") + lbrack + Group(OneOrMore(keyvalue)) + rbrack)

    creator = Group(Literal("Creator")+ Optional( restOfLine ))
    version = Group(Literal("Version")+ Optional( restOfLine ))
    graphkey = Literal("graph").suppress()

    graph = Dict (Optional(creator)+Optional(version)+\
        graphkey + lbrack + ZeroOrMore( (node|edge|keyvalue) ) + rbrack )
    graph.ignore(comment)

    return graph
项目:k-clique-graphs-dense-subgraphs    作者:giannisnik    | 项目源码 | 文件源码
def write_gml(G, path):
    """
    Write the graph G in GML format to the file or file handle path.

    Parameters
    ----------
    path : filename or filehandle
       The filename or filehandle to write.  Filenames ending in
       .gz or .gz2 will be compressed.

    See Also
    --------
    read_gml, parse_gml

    Notes
    -----
    GML specifications indicate that the file should only use
    7bit ASCII text encoding.iso8859-1 (latin-1).

    This implementation does not support all Python data types as GML
    data.  Nodes, node attributes, edge attributes, and graph
    attributes must be either dictionaries or single stings or
    numbers.  If they are not an attempt is made to represent them as
    strings.  For example, a list as edge data
    G[1][2]['somedata']=[1,2,3], will be represented in the GML file
    as::

       edge [
         source 1
         target 2
         somedata "[1, 2, 3]"
       ]


    Examples
    ---------
    >>> G=nx.path_graph(4)
    >>> nx.write_gml(G,"test.gml")

    Filenames ending in .gz or .bz2 will be compressed.

    >>> nx.write_gml(G,"test.gml.gz")
    """
    for line in generate_gml(G):
        line += '\n'
        path.write(line.encode('ascii', 'xmlcharrefreplace'))


# fixture for nose tests