Python rdflib 模块,Namespace() 实例源码

我们从Python开源项目中,提取了以下46个代码示例,用于说明如何使用rdflib.Namespace()

项目:pyfc4    作者:ghukill    | 项目源码 | 文件源码
def _build_rdf(self, data=None):

        '''
        Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph

        Args:
            data (): payload from GET request, expected RDF content in various serialization formats

        Returns:
            None
        '''

        # recreate rdf data
        self.rdf = SimpleNamespace()
        self.rdf.data = data
        self.rdf.prefixes = SimpleNamespace()
        self.rdf.uris = SimpleNamespace()
        # populate prefixes
        for prefix,uri in self.repo.context.items():
            setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri))
        # graph
        self._parse_graph()
项目:pyfc4    作者:ghukill    | 项目源码 | 文件源码
def add_namespace(self, ns_prefix, ns_uri):

        '''
        preferred method is to instantiate with repository under 'context',
        but prefixes / namespaces can be added for a Resource instance

        adds to self.rdf.prefixes which will endure through create/update/refresh,
        and get added back to parsed graph namespaces

        Args:
            ns_prefix (str): prefix for namespace, e.g. 'dc', 'foaf'
            ns_uri (str): string of namespace / ontology. e.g. 'http://purl.org/dc/elements/1.1/', 'http://xmlns.com/foaf/0.1/'

        Returns:
            None: binds this new prefix:namespace combination to self.rdf.prefixes for use, and self.rdf.graph for serialization
        '''

        # add to prefixes
        setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))

        # bind to graph
        self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)
项目:pycsvw    作者:bloomberg    | 项目源码 | 文件源码
def verify_rdf(rdf_output):
    ids_ns = Namespace("http://foo.example.org/CSV/People-IDs/")
    ages_ns = Namespace("http://foo.example.org/CSV/People-Ages/")
    g = ConjunctiveGraph()
    g.parse(data=rdf_output, format="turtle")

    all_subjects = {x for x in g.subjects()}
    assert len(all_subjects) == 2

    bob_subj = ids_ns['1']
    joe_subj = ids_ns['2']
    assert bob_subj in all_subjects
    assert joe_subj in all_subjects

    # Bob's details
    assert len([g.triples((bob_subj, ids_ns.id, Literal(1)))]) == 1
    assert len([g.triples((bob_subj, ids_ns.name, Literal("Bob")))]) == 1
    assert len([g.triples((bob_subj, ages_ns.age, Literal(34)))]) == 1

    # Joe's details
    assert len([g.triples((joe_subj, ids_ns.id, Literal(2)))]) == 1
    assert len([g.triples((joe_subj, ids_ns.name, Literal("Joe")))]) == 1
    assert len([g.triples((joe_subj, ages_ns.age, Literal(54)))]) == 1
项目:bibcat    作者:KnowledgeLinks    | 项目源码 | 文件源码
def populate_entity(self, bf_class, existing_uri=None):
        """Takes a BIBFRAME graph and MODS XML, extracts info for each
        entity's property and adds to graph.

        Args:
            bf_class(rdflib.URIRef): Namespace URI
        Returns:
           rdflib.URIRef: URI of new entity
        """
        if existing_uri:
            entity_uri = existing_uri
        else:
            # Check for custom IRIPattern
            entity_uri = self.__pattern_uri__(bf_class)
            # Finally generate an IRI from the default patterns
            if not entity_uri:
                entity_uri = self.__generate_uri__()
        self.graph.add((entity_uri, rdflib.RDF.type, bf_class))
        self.update_linked_classes(bf_class, entity_uri)
        self.update_direct_properties(bf_class, entity_uri)
        self.update_ordered_linked_classes(bf_class, entity_uri)
        self.add_admin_metadata(entity_uri)
        self.clean_rdf_types()
        return entity_uri
项目:bibcat    作者:KnowledgeLinks    | 项目源码 | 文件源码
def populate_entity(self, bf_class, existing_uri=None):
        """Takes a BIBFRAME graph and MODS XML, extracts info for each
        entity's property and adds to graph.

        Args:
            bf_class(rdflib.URIRef): Namespace URI
        Returns:
           rdflib.URIRef: URI of new entity
        """
        if existing_uri:
            entity_uri = existing_uri
        else:
            # Check for custom IRIPattern
            entity_uri = self.__pattern_uri__(bf_class)
            # Finally generate an IRI from the default patterns
            if not entity_uri:
                entity_uri = self.__generate_uri__()
        self.graph.add((entity_uri, rdflib.RDF.type, bf_class))
        self.update_linked_classes(bf_class, entity_uri)
        self.update_direct_properties(bf_class, entity_uri)
        self.update_ordered_linked_classes(bf_class, entity_uri)
        self.add_admin_metadata(entity_uri)
        self.clean_rdf_types()
        return entity_uri
项目:QuitDiff    作者:AKSW    | 项目源码 | 文件源码
def serialize(self, add, delete):

        commit = Namespace("urn:commit:" + str(uuid.uuid1()) + ":")
        eccrev = Namespace("https://vocab.eccenca.com/revision/")

        g = ConjunctiveGraph()
        namespace_manager = NamespaceManager(g)
        namespace_manager.bind('eccrev', eccrev, override=False)

        g.add((commit.term(""), RDF.type, eccrev.Commit))

        graphUris = set(delete.keys()) | set(add.keys())

        for graphUri in graphUris:
            if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
                revision = Namespace("urn:revision:" + str(uuid.uuid1()) + ":")
                g.add((commit.term(""), eccrev.hasRevision, revision.term("")))
                g.add((revision.term(""), RDF.type, eccrev.Revision))
                if str(graphUri) != 'http://quitdiff.default/':
                    g.add((revision.term(""), eccrev.hasRevisionGraph, graphUri))
                if graphUri in delete.keys() and len(delete[graphUri]) > 0:
                    deleteGraphName = revision.term(":delete")
                    g.add((revision.term(""), eccrev.deltaDelete, deleteGraphName))
                    for triple in delete[graphUri]:
                        g.add(triple + (deleteGraphName,))
                if graphUri in add.keys() and len(add[graphUri]) > 0:
                    insertGraphName = revision.term(":insert")
                    g.add((revision.term(""), eccrev.deltaInsert, insertGraphName))
                    for triple in add[graphUri]:
                        g.add(triple + (insertGraphName,))

        return g.serialize(format="trig").decode("utf-8")
项目:QuitDiff    作者:AKSW    | 项目源码 | 文件源码
def serialize(self, add, delete):
        diff = Namespace("http://topbraid.org/diff#")

        g = ConjunctiveGraph()

        namespace_manager = NamespaceManager(g)
        namespace_manager.bind('diff', diff, override=False)
        namespace_manager.bind('owl', OWL, override=False)

        graphUris = set(delete.keys()) | set(add.keys())

        for graphUri in graphUris:
            if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
                changeset = Namespace("urn:diff:" + str(uuid.uuid1()))
                graphTerm = changeset.term("")
                if str(graphUri) != 'http://quitdiff.default/':
                    g.add((graphTerm, OWL.imports, graphUri, graphTerm))
                g.add((graphTerm, RDF.type, OWL.Ontology, graphTerm))
                g.add((graphTerm, OWL.imports, diff.term(""), graphTerm))
                if graphUri in delete.keys() and len(delete[graphUri]) > 0:
                    i = 0
                    for triple in delete[graphUri]:
                        deleteStatementName = BNode()
                        g.add((deleteStatementName, RDF.type, diff.DeletedTripleDiff, graphTerm))
                        g.add((deleteStatementName, RDF.subject, triple[0], graphTerm))
                        g.add((deleteStatementName, RDF.predicate, triple[1], graphTerm))
                        g.add((deleteStatementName, RDF.object, triple[2], graphTerm))
                        i += 1
                if graphUri in add.keys() and len(add[graphUri]) > 0:
                    i = 0
                    for triple in add[graphUri]:
                        insertGraphName = BNode()
                        g.add((insertGraphName, RDF.type, diff.AddedTripleDiff, graphTerm))
                        g.add((insertGraphName, RDF.subject, triple[0], graphTerm))
                        g.add((insertGraphName, RDF.predicate, triple[1], graphTerm))
                        g.add((insertGraphName, RDF.object, triple[2], graphTerm))
                        i += 1

        return g.serialize(format="trig").decode("utf-8")
项目:QuitDiff    作者:AKSW    | 项目源码 | 文件源码
def serialize(self, add, delete):

        changeset = Namespace("http://purl.org/vocab/changeset/schema#")

        g = ConjunctiveGraph()

        namespace_manager = NamespaceManager(g)
        namespace_manager.bind('changeset', changeset, override=False)

        graphUris = set(delete.keys()) | set(add.keys())

        for graphUri in graphUris:
            if (graphUri in delete.keys() and len(delete[graphUri]) > 0) or (graphUri in add.keys() and len(add[graphUri]) > 0):
                diff = Namespace("urn:changeset:" + str(uuid.uuid1()))
                graphTerm = diff.term("")
                g.add((graphTerm, RDF.type, changeset.ChangeSet))
                if str(graphUri) != 'http://quitdiff.default/':
                    g.add((graphTerm, changeset.subjectOfChange, graphUri))
                if graphUri in delete.keys() and len(delete[graphUri]) > 0:
                    i = 0
                    for triple in delete[graphUri]:
                        deleteStatementName = BNode()
                        g.add((graphTerm, changeset.removal, deleteStatementName))
                        g.add((deleteStatementName, RDF.type, RDF.Statement))
                        g.add((deleteStatementName, RDF.subject, triple[0]))
                        g.add((deleteStatementName, RDF.predicate, triple[1]))
                        g.add((deleteStatementName, RDF.object, triple[2]))
                        i += 1
                if graphUri in add.keys() and len(add[graphUri]) > 0:
                    i = 0
                    for triple in add[graphUri]:
                        insertGraphName = BNode()
                        g.add((graphTerm, changeset.addition, insertGraphName))
                        g.add((insertGraphName, RDF.type, RDF.Statement))
                        g.add((insertGraphName, RDF.subject, triple[0]))
                        g.add((insertGraphName, RDF.predicate, triple[1]))
                        g.add((insertGraphName, RDF.object, triple[2]))
                        i += 1

        return g.serialize(format="turtle").decode("utf-8")
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def rdf_from_sources(self, names, outputFormat = "pretty-xml", rdfOutput = False) :
        """
        Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
        extracted, and serialization is done in the specified format.
        @param names: list of sources, each can be a URI, a file name, or a file-like object
        @keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml" and "pretty-xml", as well as "turtle" and "n3" are synonyms.
        @return: a serialized RDF Graph
        @rtype: string
        """
        try :
            from pyRdfaExtras import MyGraph
            graph = MyGraph()
        except :
            graph = Graph()

        for prefix in _bindings :
            graph.bind(prefix,Namespace(_bindings[prefix]))

        # the value of rdfOutput determines the reaction on exceptions...
        for name in names :
            self.graph_from_source(name, graph, rdfOutput)
        return graph.serialize(format=outputFormat)
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def test_concurrent2(): 
    dns = Namespace(u"http://www.example.com/")

    store = plugin.get("IOMemory", Store)()
    g1 = Graph(store=store)
    g2 = Graph(store=store)

    g1.add((dns.Name, dns.prop, Literal(u"test")))
    g1.add((dns.Name, dns.prop, Literal(u"test2")))
    g1.add((dns.Name, dns.prop, Literal(u"test3")))

    n = len(g1)
    i = 0

    for t in g1.triples((None, None, None)):
        i+=1
        g2.add(t)
        # next line causes problems because it adds a new Subject that needs
        # to be indexed  in __subjectIndex dictionary in IOMemory Store.
        # which invalidates the iterator used to iterate over g1
        g2.add((dns.Name1, dns.prop1, Literal(u"test")))
        g2.add((dns.Name1, dns.prop, Literal(u"test")))
        g2.add((dns.Name, dns.prop, Literal(u"test4")))

    assert i == n
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def test_ns_localname_roundtrip():

    XNS = rdflib.Namespace('http://example.net/fs')

    g = rdflib.Graph()
    g.bind('xns', str(XNS))
    g.add((
        rdflib.URIRef('http://example.com/thingy'),
        XNS['lowecase.xxx-xxx_xxx'],  # <- not round trippable
        rdflib.Literal("Junk")))
    turtledump = g.serialize(format="turtle").decode('utf-8')
    xmldump = g.serialize().decode('utf-8')
    g1 = rdflib.Graph()

    g1.parse(data=xmldump)

    g1.parse(data=turtledump, format="turtle")
项目:prophet    作者:MKLab-ITI    | 项目源码 | 文件源码
def rdf_from_sources(self, names, outputFormat = "pretty-xml", rdfOutput = False) :
        """
        Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
        extracted, and serialization is done in the specified format.
        @param names: list of sources, each can be a URI, a file name, or a file-like object
        @keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml" and "pretty-xml", as well as "turtle" and "n3" are synonyms.
        @return: a serialized RDF Graph
        @rtype: string
        """
        try :
            from pyRdfaExtras import MyGraph
            graph = MyGraph()
        except :
            graph = Graph()

        for prefix in _bindings :
            graph.bind(prefix,Namespace(_bindings[prefix]))

        # the value of rdfOutput determines the reaction on exceptions...
        for name in names :
            self.graph_from_source(name, graph, rdfOutput)
        return graph.serialize(format=outputFormat)
项目:pyfc4    作者:ghukill    | 项目源码 | 文件源码
def _parse_graph(self):

        '''
        use Content-Type from headers to determine parsing method

        Args:
            None

        Return:
            None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist
        '''

        # if resource exists, parse self.rdf.data
        if self.exists:
            self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)

        # else, create empty graph
        else:
            self.rdf.graph = rdflib.Graph()

        # bind any additional namespaces from repo instance, but do not override
        self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph)
        for ns_prefix, ns_uri in self.rdf.prefixes.__dict__.items():
            self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)

        # conversely, add namespaces from parsed graph to self.rdf.prefixes
        for ns_prefix, ns_uri in self.rdf.graph.namespaces():
            setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))
            setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix)

        # pin old graph to resource, create copy graph for modifications
        self.rdf._orig_graph = copy.deepcopy(self.rdf.graph)

        # parse triples for object-like access
        self.parse_object_like_triples()
项目:QuitStore    作者:AKSW    | 项目源码 | 文件源码
def __init__(
        self,
        configmode=None,
        configfile='config.ttl',
        repository=None,
        targetdir=None,
        versioning=True
    ):
        """The init method.

        This method checks if the config file is given and reads the config file.
        If the config file is missing, it will be generated after analyzing the
        file structure.
        """
        logger = logging.getLogger('quit.conf.QuitConfiguration')
        logger.debug('Initializing configuration object.')
        self.configchanged = False
        self.sysconf = Graph()
        self.graphconf = None
        self.versioning = versioning
        self.origin = None
        self.graphs = {}
        self.files = {}

        self.quit = Namespace('http://quit.aksw.org/')
        self.nsMngrSysconf = NamespaceManager(self.sysconf)
        self.nsMngrSysconf.bind('', 'http://quit.aksw.org/', override=False)
        self.nsMngrGraphconf = NamespaceManager(self.sysconf)
        self.nsMngrGraphconf.bind('', 'http://quit.aksw.org/', override=False)

        try:
            self.__initstoreconfig(
                repository=repository,
                targetdir=targetdir,
                configfile=configfile,
                configmode=configmode
            )
        except InvalidConfigurationError as e:
            logger.error(e)
            raise e

        return
项目:nrp    作者:django-rea    | 项目源码 | 文件源码
def agent_relationship_inv_lod(request, agent_assoc_id):
    aa = AgentAssociation.objects.filter(id=agent_assoc_id)
    if not aa:
        return HttpResponse({}, content_type='application/json')
    else:
        agent_association = aa[0]

    from rdflib import Graph, Literal, BNode
    from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
    from rdflib.serializer import Serializer
    from rdflib import Namespace, URIRef

    path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()

    ref = URIRef(instance_abbrv + ":agent-relationship-inv-lod/" + str(agent_association.id) + "/")
    inv_ref = URIRef(instance_abbrv + ":agent-relationship-lod/" + str(agent_association.id) + "/")
    ref_object = URIRef(instance_abbrv + ":agent-lod/" + str(agent_association.is_associate.id) + "/")
    ref_subject = URIRef(instance_abbrv + ":agent-lod/" + str(agent_association.has_associate.id) + "/")
    property_name = camelcase_lower(agent_association.association_type.inverse_label)
    ref_relationship = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + property_name)
    store.add((ref, RDF.type, vf_ns["Relationship"]))
    store.add((ref, vf_ns["subject"], ref_subject)) 
    store.add((ref, vf_ns["object"], ref_object))
    store.add((ref, vf_ns["relationship"], ref_relationship))
    store.add((ref, OWL.inverseOf, inv_ref))

    ser = store.serialize(format='json-ld', context=context, indent=4)
    return HttpResponse(ser, content_type='application/json')         
    #return render_to_response("valueaccounting/agent_association.html", {
    #    "agent_association": agent_association,
    #}, context_instance=RequestContext(request))
项目:nrp    作者:django-rea    | 项目源码 | 文件源码
def agent_type_lod(request, agent_type_name):
    ats = AgentType.objects.all()
    agent_type = None

    #import pdb; pdb.set_trace()
    for at in ats:
        if camelcase(at.name) == agent_type_name:
            agent_type = at

    if not agent_type:
        return HttpResponse({}, content_type='application/json') 

    from rdflib import Graph, Literal, BNode
    from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
    from rdflib.serializer import Serializer
    from rdflib import Namespace, URIRef

    path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()

    if agent_type.name != "Person" and agent_type.name != "Group" and agent_type.name != "Individual":
        class_name = camelcase(agent_type.name)
        ref = URIRef(instance_abbrv + ":agent-type-lod/" +class_name)
        store.add((ref, RDF.type, OWL.Class))
        store.add((ref, SKOS.prefLabel, Literal(class_name, lang="en")))
        if agent_type.party_type == "individual":
            store.add((ref, RDFS.subClassOf, vf_ns.Person))
        else: 
            store.add((ref, RDFS.subClassOf, vf_ns.Group))

    ser = store.serialize(format='json-ld', context=context, indent=4)
    return HttpResponse(ser, content_type='application/json')    
    #return render_to_response("valueaccounting/agent_type.html", {
    #    "agent_type": agent_type,
    #}, context_instance=RequestContext(request))
项目:nrp    作者:django-rea    | 项目源码 | 文件源码
def agent_relationship_type_lod(request, agent_assoc_type_name):
    #import pdb; pdb.set_trace()
    aats = AgentAssociationType.objects.all()
    agent_assoc_type = None
    for aat in aats:
        if camelcase_lower(aat.label) == agent_assoc_type_name:
            agent_assoc_type = aat
            inverse = False
        elif camelcase_lower(aat.inverse_label) == agent_assoc_type_name:
            agent_assoc_type = aat
            inverse = True

    if not agent_assoc_type:
        return HttpResponse({}, content_type='application/json') 

    from rdflib import Graph, Literal, BNode
    from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
    from rdflib.serializer import Serializer
    from rdflib import Namespace, URIRef

    path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()

    if inverse:
        property_name = camelcase_lower(agent_assoc_type.inverse_label)
        inverse_property_name = camelcase_lower(agent_assoc_type.label)
        label = agent_assoc_type.inverse_label
    else:
        property_name = camelcase_lower(agent_assoc_type.label)
        inverse_property_name = camelcase_lower(agent_assoc_type.inverse_label)
        label = agent_assoc_type.label
    ref = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + property_name)
    inv_ref = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + inverse_property_name)
    store.add((ref, RDF.type, RDF.Property))
    store.add((ref, SKOS.prefLabel, Literal(label, lang="en")))
    store.add((ref, OWL.inverseOf, inv_ref))

    ser = store.serialize(format='json-ld', context=context, indent=4)
    return HttpResponse(ser, content_type='application/json')      
    #return render_to_response("valueaccounting/agent_assoc_type.html", {
    #    "agent_assoc_type": agent_assoc_type,
    #}, context_instance=RequestContext(request))
项目:nrp    作者:django-rea    | 项目源码 | 文件源码
def agent_relationship_inv_lod(request, agent_assoc_id):
    aa = AgentAssociation.objects.filter(id=agent_assoc_id)
    if not aa:
        return HttpResponse({}, content_type='application/json')
    else:
        agent_association = aa[0]

    from rdflib import Graph, Literal, BNode
    from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
    from rdflib.serializer import Serializer
    from rdflib import Namespace, URIRef

    path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()

    ref = URIRef(instance_abbrv + ":agent-relationship-inv-lod/" + str(agent_association.id) + "/")
    inv_ref = URIRef(instance_abbrv + ":agent-relationship-lod/" + str(agent_association.id) + "/")
    ref_object = URIRef(instance_abbrv + ":agent-lod/" + str(agent_association.is_associate.id) + "/")
    ref_subject = URIRef(instance_abbrv + ":agent-lod/" + str(agent_association.has_associate.id) + "/")
    property_name = camelcase_lower(agent_association.association_type.inverse_label)
    ref_relationship = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + property_name)
    store.add((ref, RDF.type, vf_ns["Relationship"]))
    store.add((ref, vf_ns["subject"], ref_subject)) 
    store.add((ref, vf_ns["object"], ref_object))
    store.add((ref, vf_ns["relationship"], ref_relationship))
    store.add((ref, OWL.inverseOf, inv_ref))

    ser = store.serialize(format='json-ld', context=context, indent=4)
    return HttpResponse(ser, content_type='application/json')         
    #return render_to_response("valueaccounting/agent_association.html", {
    #    "agent_association": agent_association,
    #}, context_instance=RequestContext(request))
项目:smartcontainers    作者:crcresearch    | 项目源码 | 文件源码
def build(self):
        ds = self.graph
        self.context = {"ce":
                        "https://raw.githubusercontent.com/Vocamp/ComputationalActivity/master/pattern/ComputationalEnvironment.jsonld"}

        CE = Namespace("http://dase.cs.wright.edu/ontologies/ComputationalEnvironment#")
        CA = Namespace("http://dase.cs.wright.edu/ontologies/ComputationalActivity#")
        DOCKER = Namespace("http://w3id.org/daspos/docker#")
        info = cpuinfo.get_cpu_info()

# ISSUES: We want if the architecture URI's to be created only once on
# build or initial commit. Otherwise, we want to re-read the URI's
#  from the original graph. There are imm

        ds.bind("ce", CE)
        ceuri = URIRef(str(uuid.uuid4()))
        ds.add((ceuri, RDF.type, CE.ComputationalEnvironment))

        osUri = URIRef(str(uuid.uuid4()))
        ds.add((ceuri, CE.hasOperatingSystem, osUri))
        ds.add((osUri, RDFS.label, Literal("linux")))

        processorUri = URIRef(str(uuid.uuid4()))
        ds.add((ceuri, CE.hasHardware, processorUri))

        archUri = URIRef(str(uuid.uuid4()))
        ds.add((processorUri, CE.hasArchitecture,  archUri))
        ds.add((archUri, RDFS.label, Literal("amd64")))
        ds.add((processorUri, CE.hasNumberOfCores,
                Literal("4", datatype=XSD.nonNegativeInteger)))

        # :hasArchitecture
        # :hasNumberOfCores
        # :hasOperatingSystem
        # :hasSize Memory or HD
        # :isAvailable
        # :VirtualMACAddress
项目:smartcontainers    作者:crcresearch    | 项目源码 | 文件源码
def build(self):
        self.context = {"prov": "http://www.w3.org/ns/prov#"}
        PROV = Namespace("http://www.w3.org/ns/prov#")
        chuckORIDchuck = URIRef("http://orcid.org/000-0003-4901-6059")
        self.graph.add((chuckORIDchuck, RDF.type, PROV.Person))
项目:smartcontainers    作者:crcresearch    | 项目源码 | 文件源码
def build(self):
        self.context = {"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#"}
        UUIDNS = Namespace("urn:uuid:")
        self.graph.bind("uuidns", UUIDNS)
        self.graph.add((UUIDNS[tstuuid], RDFS.label, Literal(
            "Docker: https://www.docker.com/")))
项目:bibcat    作者:KnowledgeLinks    | 项目源码 | 文件源码
def setUp(self):
        self.processor = processor.Processor(
            rml_rules=os.path.join(FIXURES_PATH,
                                   "rml-basic.ttl"))
        self.rr = rdflib.Namespace("http://www.w3.org/ns/r2rml#")
        self.test_map = SimpleNamespace()
        self.test_map.reference = None
项目:bibcat    作者:KnowledgeLinks    | 项目源码 | 文件源码
def __init__(self, rml_rules):
        self.rml = rdflib.Graph()
        if isinstance(rml_rules, list):
            for rule in rml_rules:
                # First check if rule exists on the filesystem
                if os.path.exists(rule):
                    with open(rule) as file_obj:
                        raw_rule = file_obj.read()
                else:
                    raw_rule = get_map(rule).decode()
                self.rml.parse(data=raw_rule,
                               format='turtle')
        elif isinstance(rml_rules, (rdflib.Graph, rdflib.ConjunctiveGraph)):
            self.rml = rml_rules
        elif os.path.exists(rml_rules):
            self.rml.parse(rml_rules, format='turtle')
        else:
            self.rml.parse(data=get_map(rml_rules).decode(), format='turtle')
        # Populate Namespaces Manager
        for prefix, namespace in self.rml.namespaces():
            setattr(NS_MGR, prefix, rdflib.Namespace(namespace))
        self.output, self.source, self.triplestore_url = None, None, None
        self.parents = set()
        self.constants = dict(version=__version__)
        self.triple_maps = dict()
        for row in self.rml.query(GET_TRIPLE_MAPS):
            triple_map_iri = row[0]
            map_key = str(triple_map_iri)
            self.triple_maps[map_key] = SimpleNamespace()
            self.triple_maps[map_key].logicalSource = \
                self.__logical_source__(triple_map_iri)
            self.triple_maps[map_key].subjectMap = \
                self.__subject_map__(triple_map_iri)
            self.triple_maps[map_key].predicateObjectMap = \
                self.__predicate_object_map__(triple_map_iri)
项目:perseids-manifold    作者:RDACollectionsWG    | 项目源码 | 文件源码
def test_marmotta_server(self):
        slug = ''.join(random.choice(string.ascii_letters) for _ in range(random.randint(3, 10)))
        self.assertIsInstance(self.marmotta.server, Namespace)
        self.assertIsInstance(self.marmotta.server[slug], URIRef)
        self.assertEqual(str(self.marmotta.server[slug]), self.host+"/"+slug)
项目:perseids-manifold    作者:RDACollectionsWG    | 项目源码 | 文件源码
def __init__(self):
        self.ns = Namespace("http://www.w3.org/ns/ldp#")
项目:perseids-manifold    作者:RDACollectionsWG    | 项目源码 | 文件源码
def __init__(self, srv):
        self.server = Namespace(srv) if srv.endswith("/") else Namespace(srv+"/")
        self.ldp = lambda slug=None: self.server.ldp if slug is None else self.server["ldp"+slug[:-1]] if slug.startswith("/") and slug.endswith("/") else self.server["ldp"+slug] if slug.startswith("/") else self.server["ldp/"+slug[:-1]] if slug.endswith("/") else self.server["ldp/"+slug]
        self.sparql = Struct(select=self.server["sparql/select"], update=self.server["sparql/update"])
项目:table-extractor    作者:dbpedia    | 项目源码 | 文件源码
def define_namespace(self):
        """
        Method used to set standard names (dbr stands for dbpediaresource, dbp for dbpediaproperty,
          dbo for dbpediaontology)

        :return:
        """

        if self.chapter != 'en':
            self.dbr = rdflib.Namespace("http://" + self.chapter + ".dbpedia.org/resource/")
        else:
            self.dbr = rdflib.Namespace("http://dbpedia.org/resource/")

        self.dbo = rdflib.Namespace("http://dbpedia.org/ontology/")
        self.dbp = rdflib.Namespace("http://dbpedia.org/property/")
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def __init__(self, state, top_level) :
        """
        @param state: the state behind this term mapping
        @type state: L{state.ExecutionContext}
        @param top_level : whether this is the top node of the DOM tree (the only place where initial contexts are handled)
        @type top_level : boolean
        """
        self.state = state

        # This is to store the local terms
        self.terms  = {}
        # This is to store the local Namespaces (a.k.a. prefixes)
        self.ns     = {}
        # Default vocabulary
        self.vocabulary = None

        if state.rdfa_version < "1.1" or top_level == False :
            return

        from .initialcontext    import initial_context    as context_data
        from .host              import initial_contexts   as context_ids
        from .host              import default_vocabulary

        for id in context_ids[state.options.host_language] :
            # This gives the id of a initial context, valid for this media type:
            data = context_data[id]

            # Merge the context data with the overall definition
            if state.options.host_language in default_vocabulary :
                self.vocabulary = default_vocabulary[state.options.host_language]
            elif data.vocabulary != "" :
                self.vocabulary = data.vocabulary

            for key in data.terms :
                self.terms[key] = URIRef(data.terms[key])
            for key in data.ns :
                self.ns[key] = (Namespace(data.ns[key]),False)


##################################################################################################################
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def rdf_from_sources(self, names, outputFormat = "turtle", rdfOutput = False) :
        """
        Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
        extracted, and serialization is done in the specified format.
        @param names: list of sources, each can be a URI, a file name, or a file-like object
        @keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
        @keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
        @type rdfOutput: boolean
        @return: a serialized RDF Graph
        @rtype: string
        """
        # This is better because it gives access to the various, non-standard serializations
        # If it does not work because the extra are not installed, fall back to the standard
        # rdlib distribution...
        try :
            from pyRdfaExtras import MyGraph
            graph = MyGraph()
        except :
            graph = Graph()

        # graph.bind("xsd", Namespace('http://www.w3.org/2001/XMLSchema#'))
        # the value of rdfOutput determines the reaction on exceptions...
        for name in names :
            self.graph_from_source(name, graph, rdfOutput)
        retval = graph.serialize(format=outputFormat)
        return retval
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def contexts(self, triple=None):
        """
        Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }"
        or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`.

        Returns instances of this store with the SPARQL wrapper
        object updated via addNamedGraph(?NAME).

        This causes a named-graph-uri key / value  pair to be sent over
        the protocol.

        Please note that some SPARQL endpoints are not able to find empty named
        graphs.
        """
        self.resetQuery()

        if triple:
            nts = self.node_to_sparql
            s, p, o = triple
            params = (nts(s if s else Variable('s')),
                      nts(p if p else Variable('p')),
                      nts(o if o else Variable('o')))
            self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params)
        else:
            self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }')

        doc = ElementTree.parse(SPARQLWrapper.query(self).response)

        return (
            rt.get(Variable("name"))
            for rt, vars in _traverse_sparql_result_dom(
                doc, as_dictionary=True, node_from_result=self.node_from_result)
        )

    # Namespace persistence interface implementation
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def test_collection_render(self):
        foo = Namespace('http://www.example.org/foo/ns/')
        ex = Namespace('http://www.example.org/example/foo/')
        rdf = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')

        # Works:  x a rdf:List, a foo:Other ;
        # Fails:  y a foo:Wrapper, foo:wraps x; x a rdf:List, a foo:Other ;

        target1 = ConjunctiveGraph()
        target1.parse(data=target1xml)
        target2 = ConjunctiveGraph()
        target2.parse(data=target2xml)

        g = ConjunctiveGraph()
        bits = [ex['a'], ex['b'], ex['c']]
        l = Collection(g, ex['thing'], bits)
        triple = (ex['thing'], rdf['type'], foo['Other'])
        g.add(triple)
        triple = (ex['thing'], foo['property'], Literal('Some Value'))
        g.add(triple)
        for b in bits:
            triple = (b, rdf['type'], foo['Item'])
            g.add(triple)
        self.assertEqual(g.isomorphic(target1), True)

        # g.add((ex['wrapper'], rdf['type'], foo['Wrapper']))
        # g.add((ex['wrapper'], foo['wraps'], ex['thing']))
        # # resn3 = g.serialize(format="n3")
        # # print(resn3)
        # resxml = g.serialize(format="pretty-xml")
        # # print(resxml)
        # self.assertEqual(g.isomorphic(target2), True)
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def test_03_get_value(self):
        # is the name of entity E10009 "Arco Publications"?
        # (in graph http://bibliographica.org/entity/E10009)
        # Looking for:
        # <http://bibliographica.org/entity/E10009>
        #       <http://xmlns.com/foaf/0.1/name>
        #       "Arco Publications"
        #       <http://bibliographica.org/entity/E10009>

        g = self._load_example()
        s = URIRef("http://bibliographica.org/entity/E10009")
        FOAF = Namespace("http://xmlns.com/foaf/0.1/")
        self.assertTrue(g.value(s, FOAF.name).eq("Arco Publications"))
项目:prophet    作者:MKLab-ITI    | 项目源码 | 文件源码
def __init__(self, state, top_level) :
        """
        @param state: the state behind this term mapping
        @type state: L{state.ExecutionContext}
        @param top_level : whether this is the top node of the DOM tree (the only place where initial contexts are handled)
        @type top_level : boolean
        """     
        self.state = state

        # This is to store the local terms
        self.terms  = {}
        # This is to store the local Namespaces (a.k.a. prefixes)
        self.ns     = {}
        # Default vocabulary
        self.vocabulary = None

        if state.rdfa_version < "1.1" or top_level == False :
            return

        from .initialcontext    import initial_context    as context_data
        from .host              import initial_contexts   as context_ids
        from .host              import default_vocabulary

        for id in context_ids[state.options.host_language] :
            # This gives the id of a initial context, valid for this media type:
            data = context_data[id]

            # Merge the context data with the overall definition
            if state.options.host_language in default_vocabulary :
                self.vocabulary = default_vocabulary[state.options.host_language]
            elif data.vocabulary != "" :
                self.vocabulary = data.vocabulary

            for key in data.terms :
                self.terms[key] = URIRef(data.terms[key])
            for key in data.ns :
                self.ns[key] = (Namespace(data.ns[key]),False)


##################################################################################################################
项目:prophet    作者:MKLab-ITI    | 项目源码 | 文件源码
def rdf_from_sources(self, names, outputFormat = "turtle", rdfOutput = False) :
        """
        Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
        extracted, and serialization is done in the specified format.
        @param names: list of sources, each can be a URI, a file name, or a file-like object
        @keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
        @keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
        @type rdfOutput: boolean
        @return: a serialized RDF Graph
        @rtype: string
        """
        # This is better because it gives access to the various, non-standard serializations
        # If it does not work because the extra are not installed, fall back to the standard
        # rdlib distribution...
        try :
            from pyRdfaExtras import MyGraph
            graph = MyGraph()
        except :
            graph = Graph()

        # graph.bind("xsd", Namespace('http://www.w3.org/2001/XMLSchema#'))
        # the value of rdfOutput determines the reaction on exceptions...
        for name in names :
            self.graph_from_source(name, graph, rdfOutput)
        retval = graph.serialize(format=outputFormat)
        return retval
项目:prophet    作者:MKLab-ITI    | 项目源码 | 文件源码
def contexts(self, triple=None):
        """
        Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }"
        or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`.

        Returns instances of this store with the SPARQL wrapper
        object updated via addNamedGraph(?NAME).

        This causes a named-graph-uri key / value  pair to be sent over
        the protocol.

        Please note that some SPARQL endpoints are not able to find empty named
        graphs.
        """
        self.resetQuery()

        if triple:
            s, p, o = triple
            params = ((s if s else Variable('s')).n3(),
                      (p if p else Variable('p')).n3(),
                      (o if o else Variable('o')).n3())
            self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params)
        else:
            self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }')

        doc = ElementTree.parse(SPARQLWrapper.query(self).response)

        return (
            rt.get(Variable("name"))
            for rt, vars in _traverse_sparql_result_dom(
                doc, as_dictionary=True, node_from_result=self.node_from_result)
        )

    # Namespace persistence interface implementation
项目:odmtp-tpf    作者:benjimor    | 项目源码 | 文件源码
def _frament_fill_meta(self, tpq, fragment, last_result, total_nb_triples, nb_triple_per_page, request, tpf_url):
        meta_graph = self._tpf_uri(tpf_url, 'metadata')
        fragment.add_graph(meta_graph)
        dataset_base = self._tpf_uri(tpf_url)
        source = URIRef(request.build_absolute_uri())
        dataset_template = Literal('%s%s' % (dataset_base, '{?subject,predicate,object}'))
        data_graph = self._tpf_uri(tpf_url, 'dataset')
        tp_node = BNode('triplePattern')
        subject_node = BNode('subject')
        predicate_node = BNode('predicate')
        object_node = BNode('object')
        HYDRA = Namespace("http://www.w3.org/ns/hydra/core#")
        VOID = Namespace("http://rdfs.org/ns/void#")
        FOAF = Namespace("http://xmlns.com/foaf/0.1/")
        DCTERMS = Namespace("http://purl.org/dc/terms/")

        fragment.add_meta_quad(meta_graph, FOAF['primaryTopic'], dataset_base, meta_graph)
        fragment.add_meta_quad(data_graph, HYDRA['member'], data_graph, meta_graph)
        fragment.add_meta_quad(data_graph, RDF.type, VOID['Dataset'], meta_graph)
        fragment.add_meta_quad(data_graph, RDF.type, HYDRA['Collection'], meta_graph)
        fragment.add_meta_quad(data_graph, VOID['subset'], source, meta_graph)
        fragment.add_meta_quad(data_graph, VOID['uriLookupEndpoint'], dataset_template, meta_graph)
        fragment.add_meta_quad(data_graph, HYDRA['search'], tp_node, meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['template'], dataset_template, meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['variableRepresentation'], HYDRA['ExplicitRepresentation'], meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['mapping'], subject_node, meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['mapping'], predicate_node, meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['mapping'], object_node, meta_graph)
        fragment.add_meta_quad(subject_node, HYDRA['variable'], Literal("subject"), meta_graph)
        fragment.add_meta_quad(subject_node, HYDRA['property'], RDF.subject, meta_graph)
        fragment.add_meta_quad(predicate_node, HYDRA['variable'], Literal("predicate"), meta_graph)
        fragment.add_meta_quad(predicate_node, HYDRA['property'], RDF.predicate, meta_graph)
        fragment.add_meta_quad(object_node, HYDRA['variable'], Literal("object"), meta_graph)
        fragment.add_meta_quad(object_node, HYDRA['property'], RDF.object, meta_graph)

        fragment.add_meta_quad(dataset_base, VOID['subset'], source, meta_graph)
        fragment.add_meta_quad(source, RDF.type, HYDRA['PartialCollectionView'], meta_graph)
        fragment.add_meta_quad(source, DCTERMS['title'], Literal("TPF Twitter search API 1.1"), meta_graph)
        fragment.add_meta_quad(source, DCTERMS['description'], Literal("Triple Pattern from the twitter api matching the pattern {?s=%s, ?p=%s, ?o=%s}" % (tpq.subject, tpq.predicate, tpq.obj)), meta_graph)
        fragment.add_meta_quad(source, DCTERMS['source'], data_graph, meta_graph)
        fragment.add_meta_quad(source, HYDRA['totalItems'], Literal(total_nb_triples, datatype=XSD.int), meta_graph)
        fragment.add_meta_quad(source, VOID['triples'], Literal(total_nb_triples, datatype=XSD.int), meta_graph)
        fragment.add_meta_quad(source, HYDRA['itemsPerPage'], Literal(nb_triple_per_page, datatype=XSD.int), meta_graph)
        fragment.add_meta_quad(source, HYDRA['first'], self._tpf_url(dataset_base, 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
        if tpq.page > 1:
            fragment.add_meta_quad(source, HYDRA['previous'], self._tpf_url(dataset_base, tpq.page - 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
        if not last_result:
            fragment.add_meta_quad(source, HYDRA['next'], self._tpf_url(dataset_base, tpq.page + 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
        fragment.add_prefix('twittertpf', Namespace("%s#" % tpf_url[:-1]))
        fragment.add_prefix('void', VOID)
        fragment.add_prefix('foaf', FOAF)
        fragment.add_prefix('hydra', HYDRA)
        fragment.add_prefix('purl', Namespace('http://purl.org/dc/terms/'))
项目:odmtp-tpf    作者:benjimor    | 项目源码 | 文件源码
def _frament_fill_meta(self, tpq, fragment, last_result, total_nb_triples, nb_triple_per_page, request, tpf_url):
        meta_graph = self._tpf_uri(tpf_url, 'metadata')
        fragment.add_graph(meta_graph)
        dataset_base = self._tpf_uri(tpf_url)
        source = URIRef(request.build_absolute_uri())
        dataset_template = Literal('%s%s' % (dataset_base, '{?subject,predicate,object}'))
        data_graph = self._tpf_uri(tpf_url, 'dataset')
        tp_node = BNode('triplePattern')
        subject_node = BNode('subject')
        predicate_node = BNode('predicate')
        object_node = BNode('object')
        HYDRA = Namespace("http://www.w3.org/ns/hydra/core#")
        VOID = Namespace("http://rdfs.org/ns/void#")
        FOAF = Namespace("http://xmlns.com/foaf/0.1/")
        DCTERMS = Namespace("http://purl.org/dc/terms/")

        fragment.add_meta_quad(meta_graph, FOAF['primaryTopic'], dataset_base, meta_graph)
        fragment.add_meta_quad(data_graph, HYDRA['member'], data_graph, meta_graph)
        fragment.add_meta_quad(data_graph, RDF.type, VOID['Dataset'], meta_graph)
        fragment.add_meta_quad(data_graph, RDF.type, HYDRA['Collection'], meta_graph)
        fragment.add_meta_quad(data_graph, VOID['subset'], source, meta_graph)
        fragment.add_meta_quad(data_graph, VOID['uriLookupEndpoint'], dataset_template, meta_graph)
        fragment.add_meta_quad(data_graph, HYDRA['search'], tp_node, meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['template'], dataset_template, meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['variableRepresentation'], HYDRA['ExplicitRepresentation'], meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['mapping'], subject_node, meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['mapping'], predicate_node, meta_graph)
        fragment.add_meta_quad(tp_node, HYDRA['mapping'], object_node, meta_graph)
        fragment.add_meta_quad(subject_node, HYDRA['variable'], Literal("subject"), meta_graph)
        fragment.add_meta_quad(subject_node, HYDRA['property'], RDF.subject, meta_graph)
        fragment.add_meta_quad(predicate_node, HYDRA['variable'], Literal("predicate"), meta_graph)
        fragment.add_meta_quad(predicate_node, HYDRA['property'], RDF.predicate, meta_graph)
        fragment.add_meta_quad(object_node, HYDRA['variable'], Literal("object"), meta_graph)
        fragment.add_meta_quad(object_node, HYDRA['property'], RDF.object, meta_graph)

        fragment.add_meta_quad(dataset_base, VOID['subset'], source, meta_graph)
        fragment.add_meta_quad(source, RDF.type, HYDRA['PartialCollectionView'], meta_graph)
        fragment.add_meta_quad(source, DCTERMS['title'], Literal("TPF Github search API v3"), meta_graph)
        fragment.add_meta_quad(source, DCTERMS['description'], Literal("Triple Pattern from the github repo api v3 matching the pattern {?s=%s, ?p=%s, ?o=%s}" % (tpq.subject, tpq.predicate, tpq.obj)), meta_graph)
        fragment.add_meta_quad(source, DCTERMS['source'], data_graph, meta_graph)
        fragment.add_meta_quad(source, HYDRA['totalItems'], Literal(total_nb_triples, datatype=XSD.int), meta_graph)
        fragment.add_meta_quad(source, VOID['triples'], Literal(total_nb_triples, datatype=XSD.int), meta_graph)
        fragment.add_meta_quad(source, HYDRA['itemsPerPage'], Literal(nb_triple_per_page, datatype=XSD.int), meta_graph)
        fragment.add_meta_quad(source, HYDRA['first'], self._tpf_url(dataset_base, 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
        if tpq.page > 1:
            fragment.add_meta_quad(source, HYDRA['previous'], self._tpf_url(dataset_base, tpq.page - 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
        if not last_result:
            fragment.add_meta_quad(source, HYDRA['next'], self._tpf_url(dataset_base, tpq.page + 1, tpq.subject, tpq.predicate, tpq.obj), meta_graph)
        fragment.add_prefix('twittertpf', Namespace("%s#" % tpf_url[:-1]))
        fragment.add_prefix('void', VOID)
        fragment.add_prefix('foaf', FOAF)
        fragment.add_prefix('hydra', HYDRA)
        fragment.add_prefix('purl', Namespace('http://purl.org/dc/terms/'))
项目:nrp    作者:django-rea    | 项目源码 | 文件源码
def get_lod_setup_items():

    path = get_url_starter() + "/api/"
    instance_abbrv = Site.objects.get_current().domain.split(".")[0]

    context = {
        "vf": "https://w3id.org/valueflows/",
        "owl": "http://www.w3.org/2002/07/owl#",
        "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
        "skos": "http://www.w3.org/2004/02/skos/core#",
        "rdfs": "http://www.w3.org/2000/01/rdf-schema#",
        #"rdfs:label": { "@container": "@language" },
        "Agent": "vf:Agent",
        "Person": "vf:Person",
        "Group": "vf:Group",
        #"Organization": "vf:Organization",
        "url":  { "@id": "vf:url", "@type": "@id" },
        "image": { "@id": "vf:image", "@type": "@id" },
        #"displayName": "vf:displayName",
        #"displayNameMap": { "@id": "displayName", "@container": "@language" },
        "Relationship": "vf:Relationship",
        "subject": { "@id": "vf:subject", "@type": "@id" },
        "object": { "@id": "vf:object", "@type": "@id" },
        "relationship": { "@id": "vf:relationship", "@type": "@id" },
        #"member": { "@id": "vf:member", "@type": "@id" }
        "label": "skos:prefLabel",
        "labelMap": { "@id": "skos:prefLabel", "@container": "@language" },
        "note": "skos:note",
        "noteMap": { "@id": "skos:note", "@container": "@language" },
        "inverseOf": "owl:inverseOf",
        instance_abbrv: path,
    }

    store = Graph()
    #store.bind("foaf", FOAF)
    store.bind("rdf", RDF)
    store.bind("rdfs", RDFS)
    store.bind("owl", OWL)
    store.bind("skos", SKOS)
    #as_ns = Namespace("http://www.w3.org/ns/activitystreams#")
    #store.bind("as", as_ns)
    #schema_ns = Namespace("http://schema.org/")
    #store.bind("schema", schema_ns)
    #at_ns = Namespace(path + "agent-type/")
    #store.bind("at", at_ns)
    #aat_ns = Namespace(path + "agent-relationship-type/")
    #store.bind("aat", aat_ns)
    vf_ns = Namespace("https://w3id.org/valueflows/")
    store.bind("vf", vf_ns)
    instance_ns = Namespace(path)
    store.bind("instance", instance_ns)

    return path, instance_abbrv, context, store, vf_ns
项目:nrp    作者:django-rea    | 项目源码 | 文件源码
def get_lod_setup_items():
    from rdflib import Graph
    from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
    from rdflib import Namespace

    path = get_url_starter() + "/accounting/"
    instance_abbrv = Site.objects.get_current().domain.split(".")[0]

    context = {
        "vf": "https://w3id.org/valueflows/",
        "owl": "http://www.w3.org/2002/07/owl#",
        "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
        "skos": "http://www.w3.org/2004/02/skos/core#",
        "rdfs": "http://www.w3.org/2000/01/rdf-schema#",
        #"rdfs:label": { "@container": "@language" },
        "Agent": "vf:Agent",
        "Person": "vf:Person",
        "Group": "vf:Group",
        #"Organization": "vf:Organization",
        "url":  { "@id": "vf:url", "@type": "@id" },
        "image": { "@id": "vf:image", "@type": "@id" },
        #"displayName": "vf:displayName",
        #"displayNameMap": { "@id": "displayName", "@container": "@language" },
        "Relationship": "vf:Relationship",
        "subject": { "@id": "vf:subject", "@type": "@id" },
        "object": { "@id": "vf:object", "@type": "@id" },
        "relationship": { "@id": "vf:relationship", "@type": "@id" },
        #"member": { "@id": "vf:member", "@type": "@id" }
        "label": "skos:prefLabel",
        "labelMap": { "@id": "skos:prefLabel", "@container": "@language" },
        "note": "skos:note",
        "noteMap": { "@id": "skos:note", "@container": "@language" },
        "inverseOf": "owl:inverseOf",
        instance_abbrv: path,
    }

    store = Graph()
    #store.bind("foaf", FOAF)
    store.bind("rdf", RDF)
    store.bind("rdfs", RDFS)
    store.bind("owl", OWL)
    store.bind("skos", SKOS)
    #as_ns = Namespace("http://www.w3.org/ns/activitystreams#")
    #store.bind("as", as_ns)
    #schema_ns = Namespace("http://schema.org/")
    #store.bind("schema", schema_ns)
    #at_ns = Namespace(path + "agent-type/")
    #store.bind("at", at_ns)
    #aat_ns = Namespace(path + "agent-relationship-type/")
    #store.bind("aat", aat_ns)
    vf_ns = Namespace("https://w3id.org/valueflows/")
    store.bind("vf", vf_ns)
    instance_ns = Namespace(path)
    store.bind("instance", instance_ns)

    return path, instance_abbrv, context, store, vf_ns
项目:nrp    作者:django-rea    | 项目源码 | 文件源码
def agent_lod(request, agent_id):
    agents = EconomicAgent.objects.filter(id=agent_id)
    if not agents:
        return HttpResponse({}, content_type='application/json')

    agent = agents[0]
    subject_assocs = agent.all_is_associates()
    object_assocs = agent.all_has_associates()

    from rdflib import Graph, Literal, BNode
    from rdflib.namespace import FOAF, RDF, RDFS, OWL, SKOS
    from rdflib.serializer import Serializer
    from rdflib import Namespace, URIRef

    path, instance_abbrv, context, store, vf_ns = get_lod_setup_items()

    #Lynn: I made a change here for consistency. Please check and fix if needed.
    ref = URIRef(instance_abbrv + ":agent-lod/" + str(agent.id) + "/")
    if agent.agent_type.name == "Individual" or agent.agent_type.name == "Person":
        store.add((ref, RDF.type, vf_ns.Person))
    #elif agent.agent_type.name == "Organization":
    #    store.add((ref, RDF.type, vf_ns.Organization))
    else:
        at_class_name = camelcase(agent.agent_type.name)
        ref_class = URIRef(instance_abbrv + ":agent-type-lod/" + at_class_name)
        store.add((ref, RDF.type, ref_class))
    store.add((ref, vf_ns["label"], Literal(agent.name, lang="en")))
    #if agent.photo_url:
    #    store.add((ref, vf_ns["image"], agent.photo_url))

    #if subject_assocs or object_assocs:
    #    store.add((  ))
    if subject_assocs:
        for a in subject_assocs:
            obj_ref = URIRef(instance_abbrv + ":agent-relationship-lod/" + str(a.id) + "/")
            property_name = camelcase_lower(a.association_type.label)
            ref_relationship = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + property_name)
            store.add((ref, ref_relationship, obj_ref))
    if object_assocs:
        for a in object_assocs:
            subj_ref = URIRef(instance_abbrv + ":agent-relationship-inv-lod/" + str(a.id) + "/")
            inv_property_name = camelcase_lower(a.association_type.inverse_label)
            inv_ref_relationship = URIRef(instance_abbrv + ":agent-relationship-type-lod/" + inv_property_name)
            store.add((ref, inv_ref_relationship, subj_ref))

    ser = store.serialize(format='json-ld', context=context, indent=4)
    return HttpResponse(ser, content_type='application/json')  

#following method supplied by Niklas at rdflib-jsonld support to get the desired output for nested rdf inputs for rdflib
项目:smartcontainers    作者:crcresearch    | 项目源码 | 文件源码
def cli(ctx):
    """Smartcontainers for software and data preservation.
    Smartcontainers provides a mechanism to add metadata to Docker
    containers as a JSON-LD label. The metadata is contextualized using
    W3C recommended PROV-O and ORCID IDs to capture provenance information.
    The sc command wraps the docker commandline interface and passes any
    docker command line parameters through to docker. Any command that changes
    the state of the container is recorded in a prov graph and attached to the resultant
    image.
    """

    # Ignore config loading if we intend to create an orcid config
    if ctx.args[0] == "config" and ctx.args[1] == "orcid":
        return

    Success = False
    while not Success:
        result = config_file.read_config()
        if 'Configuration does not exist.' in result:
            print("User configuration needs to be initialized")
            selected = None
            while not selected:
                try:
                    selected = click.prompt('Do you have an ORCID profile (Y/N)')
                    if selected.lower() == 'y' or selected.lower() == 'yes':
                        config_by_search()
                        continue

                    if selected.lower() == 'n' or selected.lower() == 'no':
                        print("Please provide some basic information:")
                        query = {
                            'first_name': click.prompt(
                                'Please enter a first name', default='',
                                show_default=False
                            ),
                            'last_name': click.prompt(
                                'Please enter a last name', default='',
                                show_default=False
                            )
                        }
                        dockerUseruuid = str(uuid.uuid4())
                        UUIDNS = Namespace("urn:uuid:")
                        config_file.graph.bind("foaf", FOAF)
                        config_file.graph.add( ( UUIDNS[dockerUseruuid], FOAF.givenName, Literal(query['first_name']) ) )
                        config_file.graph.add( ( UUIDNS[dockerUseruuid], FOAF.familyName, Literal(query['last_name']) ) )

                        config_file.config_obj = config_file.graph.serialize(format='turtle')
                        config_file.write_config()
                except KeyError:
                    print('That is not a valid selection.  Please try again.\n')
        else:
            Success = True
            graph = config_file.graph
项目:smartcontainers    作者:crcresearch    | 项目源码 | 文件源码
def build(self):

        ds = self.graph
        self.context = {"prov": "http://www.w3.org/ns/prov#",
                   "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                   "rdfs": "http://www.w3.org/2000/01/rdf-schema#",
                   "xsd": "http://www.w3.org/2001/XMLSchema#",
                   "dc": "http://purl.org/dc/terms"}

        # Define some namespaces
        PROV = Namespace("http://www.w3.org/ns/prov#")
        ORE = Namespace("http://www.openarchives.org/ore/terms/")
        OWL = Namespace("http://www.w3.org/2002/07/owl#")
        DC = Namespace("http://purl.org/dc/terms/")
        UUIDNS = Namespace("urn:uuid:")
        DOCKER = Namespace("http://w3id.org/daspos/docker#")
        # W3C namespace:
        POSIX = Namespace("http://www.w3.org/ns/posix/stat#")
        ACL = Namespace("http://www.w3.org/ns/auth/acl#")

        # DASPOS namespaces
        SC = Namespace("https://w3id.org/daspos/smartcontainers#")
        CA = Namespace("https://w3id.org/daspos/computationalactivity#")
        CE = Namespace("https://w3id.org/daspos/computationalenvironment#")

        # Need to handle DOI
        # http://bitwacker.com/2010/02/04/dois-uris-and-cool-resolution/

        ds.bind("prov", PROV)
        ds.bind("ore", ORE)
        ds.bind("owl", OWL)
        ds.bind("dc", DC)
        ds.bind("uuidns", UUIDNS)
        ds.bind("docker", DOCKER)
        ds.bind("posix", POSIX)
        ds.bind("acl", ACL)
        ds.bind("sc", SC)
        ds.bind("ca", CA)
        ds.bind("ce", CE)
        ds.bind("foaf", FOAF)

        # Build agent metadata
        self.build_agent(ds)
        self.build_entity(ds)
        self.build_activity(ds)
项目:programming-the-semantic-web    作者:utecht    | 项目源码 | 文件源码
def servedata(environ):
    #Additional ns' for the queries 
    ourserver = "http://" + server_addr + ":" + str(server_port) + "/"
        MBMSG = Namespace(ourserver + "messages/")
    MBUSR = Namespace(ourserver + "users/")

    path = environ["PATH_INFO"]

    resp = {"status":"200 OK"}  
    resp["headers"] = [("Content-type", "application/rdf+xml")]

    if environ["PATH_INFO"].find("users") != -1:
        #user request query
        userid = "mbusr:" + path[path.rindex("/") + 1:]
        query = """CONSTRUCT { 
                       """ + userid + """ sioc:creator_of ?msg .
               ?msg dc:title ?title .
                       """ + userid + """ foaf:name ?name .
           } WHERE { 
               ?msg sioc:has_creator """ + userid + """ .
                       ?msg dc:title ?title .
                       """ + userid + """ foaf:name ?name .
                   } """
    else:
                #message request query                                            
        msgid = "mbmsg:" + path[path.rindex("/") + 1:]
        query = """CONSTRUCT {
                        """ + msgid + """ dc:title ?title .
                        """ + msgid + """ sioc:has_creator ?user .
                        """ + msgid + """ sioc:content ?content .
                    } WHERE { 
                        """ + msgid + """ dc:title ?title .
                        """ + msgid + """ sioc:has_creator ?user .
                        """ + msgid + """ sioc:content ?content .  
            } """

    bindingdict = {'sioc':SIOC,
                       'dc':DC,
                       'dcterms':DCTERMS,
                       'foaf':FOAF,
                       'rdfs':RDFS,
                       'mb':MB,
               'mbmsg':MBMSG,
               'mbusr':MBUSR}

    resp["body"] = [sg.query(query, initNs=bindingdict).serialize(format='xml')]

    return resp
项目:bibcat    作者:KnowledgeLinks    | 项目源码 | 文件源码
def __predicate_object_map__(self, map_iri):
        """Iterates through rr:predicateObjectMaps for this TripleMap
        creating a SimpleNamespace for each triple map and assigning the
        constant, template, parentTripleMap, reference as properties.

        Args:

        -----
                map_iri:  rdflib.URIRef, TripleMap IRI

        Returns:

        --------
                list:  List of predicate_object Namespace objects
        """
        pred_obj_maps = []
        for pred_obj_map_bnode in self.rml.objects(
                subject=map_iri,
                predicate=NS_MGR.rr.predicateObjectMap):
            pred_obj_map = SimpleNamespace()
            pred_obj_map.predicate = self.rml.value(
                subject=pred_obj_map_bnode,
                predicate=NS_MGR.rr.predicate)
            obj_map_bnode = self.rml.value(
                subject=pred_obj_map_bnode,
                predicate=NS_MGR.rr.objectMap)
            if obj_map_bnode is None:
                continue
            pred_obj_map.constant = self.rml.value(
                subject=obj_map_bnode,
                predicate=NS_MGR.rr.constant)
            pred_obj_map.template = self.rml.value(
                subject=obj_map_bnode,
                predicate=NS_MGR.rr.template)
            pred_obj_map.parentTriplesMap = self.rml.value(
                subject=obj_map_bnode,
                predicate=NS_MGR.rr.parentTriplesMap)
            if pred_obj_map.parentTriplesMap is not None:
                self.parents.add(str(pred_obj_map.parentTriplesMap))
            pred_obj_map.reference = self.rml.value(
                subject=obj_map_bnode,
                predicate=NS_MGR.rr.reference)
            pred_obj_map.datatype = self.rml.value(
                subject=obj_map_bnode,
                predicate=NS_MGR.rr.datatype)
            pred_obj_map.query = self.rml.value(
                subject=obj_map_bnode,
                predicate=NS_MGR.rml.query)
            # BIBCAT Extensions
            pred_obj_map.delimiters = []
            for obj in self.rml.objects(subject=obj_map_bnode,
                                        predicate=NS_MGR.kds.delimiter):
                pred_obj_map.delimiters.append(obj)
            pred_obj_maps.append(pred_obj_map)
        return pred_obj_maps
项目:Meiji    作者:GiovanniBalestrieri    | 项目源码 | 文件源码
def __init__( self, document, graph, base = None, vocab_expansion = False, vocab_cache = True  ) :
        """
        @param graph: an RDF graph; an RDFLib Graph
        @type graph: RDFLib Graph
        @param document: top of the DOM tree, as returned by the HTML5 parser
        @keyword base: the base of the Dom tree, either set from the outside or via a @base element
        @keyword vocab_expansion: whether vocab expansion should be performed or not
        @type vocab_expansion: Boolean
        @keyword vocab_cache: if vocabulary expansion is done, then perform caching of the vocabulary data
        @type vocab_cache: Boolean
        """
        Microdata.__init__(self, document, base)
        self.vocab_expansion   = vocab_expansion
        self.vocab_cache       = vocab_cache
        self.graph             = graph
        self.ns_md             = Namespace( MD_VOCAB )
        self.graph.bind( "md",MD_VOCAB )
        self.vocabularies_used = False

        # Get the vocabularies defined in the registry bound to proper names, if any...

        def _use_rdfa_context () :
            try :
                from ..pyRdfa.initialcontext import initial_context
            except :
                from pyRdfa.initialcontext import initial_context
            retval = {}
            vocabs = initial_context["http://www.w3.org/2011/rdfa-context/rdfa-1.1"].ns
            for prefix in list(vocabs.keys()) :
                uri = vocabs[prefix]
                if uri not in vocab_names and uri not in registry : retval[uri] = prefix
            return retval

        for vocab in registry :
            if vocab in vocab_names :
                self.graph.bind( vocab_names[vocab],vocab )
            else :
                hvocab = vocab + '#'
                if hvocab in vocab_names :
                    self.graph.bind( vocab_names[hvocab],hvocab )

        # Add the prefixes defined in the RDFa initial context to improve the outlook of the output
        # I put this into a try: except: in case the pyRdfa package is not available...
        try :
            try :
                from ..pyRdfa.initialcontext import initial_context
            except :
                from pyRdfa.initialcontext import initial_context
            vocabs = initial_context["http://www.w3.org/2011/rdfa-context/rdfa-1.1"].ns
            for prefix in list(vocabs.keys()) :
                uri = vocabs[prefix]
                if uri not in registry :
                    # if it is in the registry, then it may have needed some special microdata massage...
                    self.graph.bind( prefix,uri )
        except :
            pass
项目:prophet    作者:MKLab-ITI    | 项目源码 | 文件源码
def __init__( self, document, graph, base = None, vocab_expansion = False, vocab_cache = True  ) :
        """
        @param graph: an RDF graph; an RDFLib Graph
        @type graph: RDFLib Graph
        @param document: top of the DOM tree, as returned by the HTML5 parser
        @keyword base: the base of the Dom tree, either set from the outside or via a @base element
        @keyword vocab_expansion: whether vocab expansion should be performed or not
        @type vocab_expansion: Boolean
        @keyword vocab_cache: if vocabulary expansion is done, then perform caching of the vocabulary data
        @type vocab_cache: Boolean
        """
        Microdata.__init__(self, document, base)
        self.vocab_expansion   = vocab_expansion
        self.vocab_cache       = vocab_cache
        self.graph             = graph
        self.ns_md             = Namespace( MD_VOCAB )
        self.graph.bind( "md",MD_VOCAB )
        self.vocabularies_used = False

        # Get the vocabularies defined in the registry bound to proper names, if any...

        def _use_rdfa_context () :
            try :
                from ..pyRdfa.initialcontext import initial_context
            except :
                from pyRdfa.initialcontext import initial_context
            retval = {}
            vocabs = initial_context["http://www.w3.org/2011/rdfa-context/rdfa-1.1"].ns
            for prefix in list(vocabs.keys()) :
                uri = vocabs[prefix]                
                if uri not in vocab_names and uri not in registry : retval[uri] = prefix
            return retval

        for vocab in registry :
            if vocab in vocab_names :
                self.graph.bind( vocab_names[vocab],vocab )
            else :
                hvocab = vocab + '#'
                if hvocab in vocab_names :
                    self.graph.bind( vocab_names[hvocab],hvocab )

        # Add the prefixes defined in the RDFa initial context to improve the outlook of the output
        # I put this into a try: except: in case the pyRdfa package is not available...
        try :
            try :
                from ..pyRdfa.initialcontext import initial_context
            except :
                from pyRdfa.initialcontext import initial_context
            vocabs = initial_context["http://www.w3.org/2011/rdfa-context/rdfa-1.1"].ns
            for prefix in list(vocabs.keys()) :
                uri = vocabs[prefix]
                if uri not in registry :
                    # if it is in the registry, then it may have needed some special microdata massage...
                    self.graph.bind( prefix,uri )
        except :
            pass