Python nltk 模块,RegexpTokenizer() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用nltk.RegexpTokenizer()

项目:text-matcher    作者:JonathanReeve    | 项目源码 | 文件源码
def getTokens(self, removeStopwords=True):
        """ Tokenizes the text, breaking it up into words, removing punctuation. """
        tokenizer = nltk.RegexpTokenizer('[a-zA-Z]\w+\'?\w*') # A custom regex tokenizer.
        spans = list(tokenizer.span_tokenize(self.text))
        # Take note of how many spans there are in the text
        self.length = spans[-1][-1]
        tokens = tokenizer.tokenize(self.text)
        tokens = [ token.lower() for token in tokens ] # make them lowercase
        stemmer = LancasterStemmer()
        tokens = [ stemmer.stem(token) for token in tokens ]
        if not removeStopwords:
            self.spans = spans
            return tokens
        tokenSpans = list(zip(tokens, spans)) # zip it up
        stopwords = nltk.corpus.stopwords.words('english') # get stopwords
        tokenSpans = [ token for token in tokenSpans if token[0] not in stopwords ] # remove stopwords from zip
        self.spans = [ x[1] for x in tokenSpans ] # unzip; get spans
        return [ x[0] for x in tokenSpans ] # unzip; get tokens
项目:PyTrafficCar    作者:liyuming1978    | 项目源码 | 文件源码
def load_data():
    global N, words

    raw = list(word 
            for fileid in corpus.fileids()
            for word in corpus.words(fileid))
    words = list(token for token in RegexpTokenizer('\w+').tokenize(' '.join(raw)))[100:1000]
    tokens = set(words)
    tokens_l = list(tokens)
    N = len(tokens)
    print 'Corpus size: {} words'.format(N)

    step = 4
    data = []
    for gram in ngrams(words, step):
        w1, w2, w3, pred = gram
        V = Vol(1, 1, N, 0.0)
        V.w[tokens_l.index(w1)] = 1
        V.w[tokens_l.index(w2)] = 1
        V.w[tokens_l.index(w3)] = 1
        label = tokens_l.index(pred)
        data.append((V, label))

    return data
项目:PyTrafficCar    作者:liyuming1978    | 项目源码 | 文件源码
def test(): 
    gt = GetTweets()
    documents = gt.get_hashtag('ferguson', count=20)
    documents += gt.get_hashtag('police', count=21)
    print 'Query:', documents[-1]

    tokenizer = RegexpTokenizer('\w+')
    vols = []
    for doc in documents:
        samples = []
        for token in tokenizer.tokenize(doc):
            word = token.lower()
            if word not in ENGLISH_STOP_WORDS and word not in punctuation:
                samples.append(word)
        vols.append(volumize(FreqDist(samples)))

    vectors = [ doc_code(v) for v in vols[:-1] ]
    query_vec = doc_code(vols[-1])

    sims = [ cos(v, query_vec) for v in vectors ]
    m = max(sims)
    print m, documents[sims.index(m)]
项目:geocoder-ie    作者:devgateway    | 项目源码 | 文件源码
def __call__(self, doc):
        return [self.wnl.lemmatize(t) for t in RegexpTokenizer(r'[A-z]+').tokenize(doc)]
项目:geocoder-ie    作者:devgateway    | 项目源码 | 文件源码
def __call__(self, doc):
        return [self.stm.stem(t) for t in RegexpTokenizer(r'[A-z]+').tokenize(doc)]
项目:newsrecommender    作者:Newsrecommender    | 项目源码 | 文件源码
def __init__(self): 
        self.tokenizer = RegexpTokenizer(r'[a-zA-Z_]+')
        #self.p_stem = PorterStemmer()
        self.p_stem = SnowballStemmer("english")
        self.news_sources ={"News": ["http://timesofindia.indiatimes.com/rssfeeds/5880659.cms", "http://www.economist.com/sections/science-technology/rss.xml"]}
项目:PyTrafficCar    作者:liyuming1978    | 项目源码 | 文件源码
def test():
    global N, words, network

    print 'In testing.'

    gettysburg = """Four score and seven years ago our fathers brought forth on this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure. We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper that we should do this. But, in a larger sense, we can not dedicate -- we can not consecrate -- we can not hallow -- this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here. It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so nobly advanced. It is rather for us to be here dedicated to the great task remaining before us -- that from these honored dead we take increased devotion to that cause for which they gave the last full measure of devotion -- that we here highly resolve that these dead shall not have died in vain -- that this nation, under God, shall have a new birth of freedom -- and that government of the people, by the people, for the people, shall not perish from the earth."""
    tokenizer = RegexpTokenizer('\w+')
    gettysburg_tokens = tokenizer.tokenize(gettysburg) 

    samples = []
    for token in gettysburg_tokens:
        word = token.lower()
        if word not in ENGLISH_STOP_WORDS and word not in punctuation:
            samples.append(word)

    dist = FreqDist(samples)
    V = Vol(1, 1, N, 0.0)
    for i, word in enumerate(words):
        V.w[i] = dist.freq(word)

    pred = network.forward(V).w
    topics = []
    while len(topics) != 5:
        max_act = max(pred)
        topic_idx = pred.index(max_act)
        topic = words[topic_idx]

        if topic in gettysburg_tokens:
            topics.append(topic)

        del pred[topic_idx]

    print 'Topics of the Gettysburg Address:'
    print topics
项目:neural-turkish-morphological-disambiguator    作者:onurgu    | 项目源码 | 文件源码
def tokenize(line):
    tokenizer = RegexpTokenizer('\w+|\$[\d\.]+|\S+')
    return tokenizer.tokenize(line)
项目:stance_detection    作者:StanceDetection    | 项目源码 | 文件源码
def gen_jaccard_sims(self, dataset, body_ids, stances):
        # currently assumes both body and headline are longer than 0.
        punc_rem_tokenizer = nltk.RegexpTokenizer(r'\w+')

        avg_sims = []
        max_sims = []

        parsed_bodies_dict = {}
        # for body_id, body in self.dataset.articles.iteritems():
        for body_id in body_ids:
            body = dataset.articles[body_id].lower()
            sents = nltk.sent_tokenize(body)
            sents = self._remove_punctuation(sents)
            sents = self._word_tokenize(sents)
            parsed_bodies_dict[body_id] = sents # cache parsed body

        for st in stances:
            headline = st['Headline'].lower()
            headline = headline.translate(self.REMOVE_PUNC_MAP)
            headline = nltk.word_tokenize(headline)
            body_id = st['Body ID']
            sents = parsed_bodies_dict[body_id]

            jacc_sims = []
            for sent in sents:
                if len(sent) < 1:
                    continue
                hs = set(headline)
                ss = set(sent)
                jacc_sim = len(hs.intersection(ss)) / float(len(hs.union(ss)))
                jacc_sims.append(jacc_sim)

            max_sim = max(jacc_sims)
            avg_sim = sum(jacc_sims) / float(len(jacc_sims))

            max_sims.append(max_sim)
            avg_sims.append(avg_sim)

        return avg_sims, max_sims
项目:stance_detection    作者:StanceDetection    | 项目源码 | 文件源码
def _gen_jaccard_sims(self, bodies_dict, stances):
        # currently assumes both body and headline are longer than 0.
        punc_rem_tokenizer = nltk.RegexpTokenizer(r'\w+')

        avg_sims = []
        max_sims = []

        parsed_bodies_dict = {}
        for body_id, body in bodies_dict.iteritems():
            sents = nltk.sent_tokenize(body)
            sents = self._remove_punctuation(sents)
            sents = self._word_tokenize(sents)
            parsed_bodies_dict[body_id] = sents # cache parsed body


        for st in stances:
            headline = st['Headline']
            headline = headline.translate(self.REMOVE_PUNC_MAP)
            headline = nltk.word_tokenize(headline)

            jacc_sims = []
            for sent in sents:
                if len(sent) < 1:
                    continue
                # extend shorter word list so that both are the same length
                len_diff = len(headline) - len(sent)
                headline_cpy = headline
                sent_cpy = sent

                if len_diff < 0: # sent longer than headline
                    headline_cpy = headline_cpy + ([headline_cpy[-1]] * abs(len_diff))
                elif len_diff > 0: # headline longer than sent
                    sent_cpy = sent_cpy + ([sent_cpy[-1]] * abs(len_diff))

                jacc_sims.append(jaccard_similarity_score(headline_cpy, sent_cpy))

            avg_sim = self._threshold_parser((sum(jacc_sims) / len(jacc_sims)), [0.2])
            max_sim = self._threshold_parser(max(jacc_sims), [0.2])
            avg_sims.append(avg_sim)
            max_sims.append(max_sim)

        return avg_sims, max_sims