赞
踩
需要Python2.7或3.4+
pip install -U nltk
import nltk
nltk.download()
# 导入Brown Corpus
from nltk.corpus import brown
brown.words()
下载之后,如果找不到数据,需要设置NLTK_DATA为数据的目录。
form nltk.book import *
#打印出输入单词在文本中出现的上下文
text1.concordance('monstrous')
#打印出和输入单词具有相同上下文的其他单词
text1.similar('monstrous')
#接受一个单词列表,会打印出列表中所有单词共同的上下文
text1.common_contexts(['monstrous', 'gamesome'])
#绘制每个单词在文本中的分布情况
text4.dispersion_plot(['freedom', 'America'])
#返回该单词在文本中出现的次数
text1.count('monstrous')
#打印出文本中频繁出现的双连词
text1.collocations()
import nltk
from nltk.book import *
'''
生成FreqDist对象,FreqDist继承自dict
FreqDist中的键为单词,值为单词的出现总次数
FreqDist构造函数接受任意一个列表
'''
fdist1 = FreqDist(text1)
#绘制高频词汇
fdist1.plot(10)
#以表格的方式打印出现次数最多的前15项
fdist1.tabulate(15)
#返回出现次数最多的前15项列表
#[(',', 18713), ('the', 13721), ('.', 6862), ('of', 6536), ('and', 6024), ...
fdist1.most_common(15)
#返回一个低频项列表,低频项即出现一次的项
#['whalin', 'comforts', 'footmanism', 'peacefulness', 'incorruptible', ...]
FreqDist::hapaxes()
#返回出现次数最多的项
fdist1.max()
#文本中长度大于7个字符出现次数超过7次的词
words = set(text1)
long_words = [w for w in words if len(w) > 7 and fdist1[w] > 7]
print(sorted(long_words))
斯坦福中文分词器支持词性标注,命名实体识别和句法分析。
下载最新的jar包
下载SLF4J
# -*- coding:utf-8 -*-
from nltk.tokenize.stanford_segmenter import StanfordSegmenter
segmenter = StanfordSegmenter(
path_to_jar="stanford-segmenter-3.7.0.jar",
path_to_slf4j="slf4j-simple-1.7.25.jar",
path_to_sihan_corpora_dict="./data",
path_to_model="./data/pku.gz",
path_to_dict="./data/dict-chris6.ser.gz"
)
sentence = u"这是斯坦福中文分词器测试"
# 这 是 斯坦福 中文 分词器 测试
print segmenter.segment(sentence)
print segmenter.segment_file("test.simp.utf8")
import nltk
#古腾堡语料库 gutenberg、webtext和inaugural是PlaintextCorpusReader的实例对象
from nltk.corpus import gutenberg
#['austen-emma.txt', 'austen-persuasion.txt', 'austen-sense.txt', '...
#返回语料库中的文本标识列表
gutenberg.fileids()
#接受一个或多个文本标识作为参数,返回文本单词列表
#['[', 'Emma', 'by', 'Jane', 'Austen', '1816', ']', ...]
emma = gutenberg.words("austen-emma.txt")
#接受一个或多个文本标识为参数,返回文本原始字符串
#'[Emma by Jane Austen 1816]\n\nVOLUME I\n\nCHAPTER I\n\n\nEmma Woodhouse, ...'
emma_str = gutenberg.raw("austen-emma.txt")
#接受一个或多个文本标识为参数,返回文本中的句子列表
emma_sents = gutenberg.sents("austen-emma.txt")
print(emma_sents)
#网络文本语料库
#['firefox.txt', 'grail.txt', 'overheard.txt', 'pirates.txt', 'singles.txt', 'wine.txt']
from nltk.corpus import webtext
print(webtext.fileids())
#就职演说语料库
from nltk.corpus import inaugural
print(inaugural.fileids())
#即时消息聊天会话语料库 nps_chat是一个NPSChatCorpusReader对象
from nltk.corpus import nps_chat
print(nps_chat.fileids())
#返回一个包含对话的列表,每一个对话又同时是单词的列表
chat_room = nps_chat.posts('10-19-30s_705posts.xml')
print(chat_room)
#布朗语料库 brown和reuters是CategorizedTaggedCorpusReader的实例对象
from nltk.corpus import brown
#返回语料库中的类别标识
print(brown.categories())
#接受一个或多个类别标识作为参数,返回文本标识列表
print(brown.fileids(['news', 'lore']))
#接受文本标识或者类别标识作为参数,返回文本单词列表
ca02 = brown.words(fileids='ca02')
print('ca02: ', ca02)
#路透社语料库
from nltk.corpus import reuters
print(reuters.categories())
#条件频率分布对象(ConditionalFreqDist),继承自dict,条件频率分布是频率分布的集合,每个频率分布有一个不同的条件
import nltk
from nltk.corpus import brown
#条件频率分布需要处理的是配对列表,每对的形式是(条件,事件),在示例中条件为文体类别,事件为单词。
pairs = [(genre, word) for genre in brown.categories() for word in brown.words(categories=genre)]
cfd = nltk.ConditionalFreqDist(pairs)
#返回条件列表
print(cfd.conditions())
#根据指定的条件和样本,打印条件频率分布表格
genres = ['news', 'religion', 'hobbies', 'science_fiction', 'romance', 'humor']
modals = ['can', 'could', 'may', 'might', 'must', 'will']
cfd.tabulate(conditions=genres, samples=modals)
#据给定的条件和样本,绘制条件频率分布图
cfd.plot(conditions=genres, samples=modals)
#根据给定的单词列表,生成所有的双连词组
sent = ['I', 'am', 'a', 'good', 'man']
#[('I', 'am'), ('am', 'a'), ('a', 'good'), ('good', 'man')]
print(list(nltk.bigrams(sent)))
#对所有的双连词进行条件频率分布处理,得到后续词的频率分布
text = brown.words(categories='news')
bigrams_words = nltk.bigrams(text)
cfd = nltk.ConditionalFreqDist(bigrams_words)
fd = cfd['can']
fd.plot(10)
import nltk
#词性标注器
#对指定的句子进行分词,返回单词列表
words = nltk.word_tokenize('And now for something completely different')
#['And', 'now', 'for', 'something', 'completely', 'different']
print(words)
#对指定的单词列表进行词性标记,返回标记列表
word_tag = nltk.pos_tag(words)
#[('And', 'CC'), ('now', 'RB'), ('for', 'IN'), ('something', 'NN'), ('completely', 'RB'), ('different', 'JJ')]
print(word_tag)
#标注语料库
#brown可以看作是一个CategorizedTaggedCorpusReader实例对象。
from nltk.corpus import brown
words_tag = brown.tagged_words(categories='news')
#[('The', 'AT'), ('Fulton', 'NP-TL'), ('County', 'NN-TL'), ('Grand', 'JJ-TL'),...
print(words_tag[:10])
#接受文本标识或者类别标识作为参数,返回这些文本被标注词性后的句子列表,句子为单词列表
tagged_sents = brown.tagged_sents(categories='news')
print(tagged_sents)
#中文语料库sinica_treebank,该库使用繁体中文,该库也被标注了词性
#sinica_treebank可以看做是一个SinicaTreebankCorpusReader实例对象。
from nltk.corpus import sinica_treebank
#['parsed']
print(sinica_treebank.fileids())
#返回文本的单词列表
words = sinica_treebank.words('parsed')
print(words[:40])
#返回文本被标注词性后的单词列表
words_tag = sinica_treebank.tagged_words('parsed')
print(words_tag[:40])
#查看最常见词
words_tag = sinica_treebank.tagged_words('parsed')
tag_fd = nltk.FreqDist(tag for (word, tag) in words_tag)
tag_fd.tabulate(5)
import nltk
raw = "You are a good man, but i don't love you!"
tokens = nltk.word_tokenize(raw)
#构造函数接受一个标记字符串作为参数,生成一个默认标注器对象
default_tagger = nltk.DefaultTagger('NN')
#对指定的单词列表进行标记,返回被标记后的单词列表
tagged_words = default_tagger.tag(tokens)
print(tagged_words)
from nltk.corpus import brown
#使用已经被标记的句子评价标注器,返回正确率0~1.0
tagged_sents = brown.tagged_sents(categories='news')
#0.13089484257215028
print(default_tagger.evaluate(tagged_sents))
# 查询标注器
# 对新闻文本进行频率分布,找出新闻文本最常用的100个单词
fd = nltk.FreqDist(brown.words(categories='news'))
most_common_pairs = fd.most_common(100)
most_common_words = [i[0] for i in most_common_pairs]
# 对标记后的新闻文本进行条件频率分布,这样我们就可以找到指定单词最多的标记是哪一个
cfd = nltk.ConditionalFreqDist(brown.tagged_words(categories='news'))
# 找出最常用的100个单词的最多标记 一个(单词-标记)字典 UnigramTagger和DefaultTagger类都继承自TaggerI
likely_tags = dict((word, cfd[word].max()) for word in most_common_words)
# 使用(单词-标记)字典作为模型,生成查询标注器
baseline_tagger = nltk.UnigramTagger(model=likely_tags)
tagged_sents = brown.tagged_sents(categories='news')
# 0.45578495136941344
print(baseline_tagger.evaluate(tagged_sents))
# 许多词被分配为None标签,可以给它们一个默认标记
raw = "You are a good man, but i don't love you!"
tokens = nltk.word_tokenize(raw)
# [('You', None), ('are', 'BER'), ('a', 'AT'), ('good', None), (...
print(baseline_tagger.tag(tokens))
# 使用默认标注器作为回退
baseline_tagger2 = nltk.UnigramTagger(model=likely_tags, backoff=nltk.DefaultTagger('NN'))
tagged_sents = brown.tagged_sents(categories='news')
# 0.5817769556656125
print(baseline_tagger2.evaluate(tagged_sents))
# 增大单词数量,则正确率还会提升。对新闻文本进行频率分布,找出新闻文本最常用的500个单词
fd = nltk.FreqDist(brown.words(categories='news'))
most_common_pairs = fd.most_common(500)
most_common_words = [i[0] for i in most_common_pairs]
# 对标记后的新闻文本进行条件频率分布,这样我们就可以找到指定单词最多的标记是哪一个
cfd = nltk.ConditionalFreqDist(brown.tagged_words(categories='news'))
# 找出最常用的500个单词的最多标记
likely_tags = dict((word, cfd[word].max()) for word in most_common_words)
# 使用(单词-标记)字典作为模型,生成查询标注器
baseline_tagger = nltk.UnigramTagger(model=likely_tags, backoff=nltk.DefaultTagger('NN'))
tagged_sents = brown.tagged_sents(categories='news')
# 0.6789983491457326
print(baseline_tagger.evaluate(tagged_sents))
import nltk
from nltk.corpus import brown
tagged_sents = brown.tagged_sents(categories='news')
# 生成一元标注器
unigram_tagger = nltk.UnigramTagger(train=tagged_sents)
# 0.9349006503968017
print(unigram_tagger.evaluate(tagged_sents))
#如何判断标注器过拟合。分离训练集和测试集,把数据集的90%作为训练集,10%作为测试集
tagged_sents = brown.tagged_sents(categories='news')
size = int(len(tagged_sents) * 0.9)
train_sets = tagged_sents[:size]
test_sets = tagged_sents[size:]
# 生成一元标注器
unigram_tagger = nltk.UnigramTagger(train=train_sets)
# 0.9353630649241612
print(unigram_tagger.evaluate(train_sets))
# 0.8115219774743347
print(unigram_tagger.evaluate(test_sets))
# 词性跟上下文环境有关系
tagged_sents = brown.tagged_sents(categories='news')
size = int(len(tagged_sents) * 0.9)
train_sets = tagged_sents[:size]
test_sets = tagged_sents[size:]
# 生成二元标注器
bigram_tagger = nltk.BigramTagger(train=train_sets)
# 0.7890434263872471
print(bigram_tagger.evaluate(train_sets))
# 0.10186384929731884
print(bigram_tagger.evaluate(test_sets))
二元标注器会考查一个单词本身和它前一个单词的标记,如果遇到一个新词,那么二元标注器就没法标记它,并且还会导致接下来的单词都没法标记,所以我们会看到二元标注器在测试集上正确率很低。
按照下列方式组合
- 尝试使用bigram标注器标注单词。
- 如果bigram标注器无法找到一个标记,尝试unigram标注器。
- 如果unigram标注器无法找到一个标记,使用默认标注器。
import nltk
from nltk.corpus import brown
# 划分训练集和测试集
tagged_sents = brown.tagged_sents(categories='news')
size = int(len(tagged_sents) * 0.9)
train_sets = tagged_sents[:size]
test_sets = tagged_sents[size:]
# 训练标注器,并把它们组合起来
t0 = nltk.DefaultTagger('NN')
t1 = nltk.UnigramTagger(train=train_sets, backoff=t0)
t2 = nltk.BigramTagger(train=train_sets, backoff=t1)
# 查看标注器性能
# 0.9735641453364413
print(t2.evaluate(train_sets))
# 0.8459085019435861
print(t2.evaluate(test_sets))
import nltk
import random
from nltk.classify import apply_features
from nltk.corpus import PlaintextCorpusReader
names_corpus = PlaintextCorpusReader('./', ['female.txt', 'male.txt'])
all_names = names_corpus.words()
ch_freq = nltk.FreqDist(ch.lower() for name in all_names for ch in name)
ch_freq_most = ch_freq.most_common(1000)
ch_features = [ch for (ch, count) in ch_freq_most]
print(ch_freq_most)
def name_features(name):
"""
名称特征提取
:param name: 名称
:return: 名称特征
"""
name_chs = set([ch.lower() for ch in name])
features = {}
for ch in ch_features:
features['contain(%s)' % ch] = (ch in name_chs)
return features
female_names = [(name, 'female')for name in names_corpus.words('female.txt')]
male_names = [(name, 'male')for name in names_corpus.words('male.txt')]
total_names = female_names + male_names
random.shuffle(total_names)
train_set_size = int(len(total_names) * 0.6)
train_names = total_names[:train_set_size]
test_names = total_names[train_set_size:]
train_set = apply_features(name_features, train_names, True)
test_set = apply_features(name_features, test_names, True)
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier, train_set))
print(nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features(20)
for (name, tag) in test_names:
guess = classifier.classify(name_features(name))
if guess != tag:
print(tag, guess, name)
将一个或多个连续的词分成一块。
分块是用于实体识别的基本技术。
名词短语分块也叫做NP-分块,NP-分块信息最有用的来源之一是词性标记,所以我们一般在分块前都会进行词性标注。
符号 | 含义 | 例子 |
---|---|---|
S | sentence | the man walked |
NP | noun phrase | a dog |
VP | verb phrase | saw a park |
PP | prepositional | phrase with a telescope |
Det | determiner | the |
N | noun | dog |
V | verb | walked |
P | preposition | in |
import nltk
# 分词
text = "Lucy let down her long golden hair"
sentence = nltk.word_tokenize(text)
# 词性标注
sentence_tag = nltk.pos_tag(sentence)
print(sentence_tag)
# 定义分块语法
# NNP(专有名词) PRP$(格代名词)
# 第一条规则匹配可选的词(限定词或格代名词),零个或多个形容词,然后跟一个名词
# 第二条规则匹配一个或多个专有名词
# $符号是正则表达式中的一个特殊字符,必须使用转义符号\来匹配PP$
grammar = r"""
NP: {<DT|PRP\$>?<JJ>*<NN>}
{<NNP>+}
"""
# 进行分块
cp = nltk.RegexpParser(grammar)
tree = cp.parse(sentence_tag)
tree.draw()
import nltk
# 分词
text = "the little yellow dog barked at the cat"
sentence = nltk.word_tokenize(text)
# 词性标注
sentence_tag = nltk.pos_tag(sentence)
print(sentence_tag)
# 定义缝隙语法
# 第一个规则匹配整个句子
# 第二个规则匹配一个或多个动词或介词
# 一对}{就代表把其中语法匹配到的词作为缝隙
grammar = r"""
NP: {<.*>+}
}<VBD|IN>+{
"""
cp = nltk.RegexpParser(grammar)
# 分块 NLTK Tree
tree = cp.parse(sentence_tag)
tree.draw()
块在文本文件中的标准方式是IOB标记:I(inside,内部),O(outside,外部),B(begn,开始)
import nltk
from nltk.corpus import conll2000
# 加载训练文本中的NP块,返回的结果可以当作是一个列表,列表中的元素是Tree对象
# 每一个Tree对象就是一个被分块的句子
test_sents = conll2000.chunked_sents("train.txt", chunk_types=["NP"])
# tree2conlltags函数可以将Tree对象转换为IBO标记格式的列表
tags = nltk.chunk.tree2conlltags(test_sents[0])
print(tags)
# 查找以名词短语标记的特征字母(如CD、DT 和JJ)开头的标记
grammar = r"NP: {<[CDJNP].*>+}"
cp = nltk.RegexpParser(grammar)
# 加载训练文本中的NP块
test_sents = conll2000.chunked_sents("train.txt", chunk_types=["NP"])
print(cp.evaluate(test_sents))
class UnigramChunker(nltk.ChunkParserI):
"""
一元分块器,
该分块器可以从训练句子集中找出每个词性标注最有可能的分块标记,
然后使用这些信息进行分块
"""
def __init__(self, train_sents):
"""
构造函数
:param train_sents: Tree对象列表
"""
train_data = []
for sent in train_sents:
# 将Tree对象转换为IOB标记列表[(word, tag, IOB-tag), ...]
conlltags = nltk.chunk.tree2conlltags(sent)
# 找出每个词性标注对应的IOB标记
ti_list = [(t, i) for w, t, i in conlltags]
train_data.append(ti_list)
# 使用一元标注器进行训练
self.__tagger = nltk.UnigramTagger(train_data)
def parse(self, tokens):
"""
对句子进行分块
:param tokens: 标注词性的单词列表
:return: Tree对象
"""
# 取出词性标注
tags = [tag for (word, tag) in tokens]
# 对词性标注进行分块标记
ti_list = self.__tagger.tag(tags)
# 取出IOB标记
iob_tags = [iob_tag for (tag, iob_tag) in ti_list]
# 组合成conll标记
conlltags = [(word, pos, iob_tag) for ((word, pos), iob_tag) in zip(tokens, iob_tags)]
return nltk.chunk.conlltags2tree(conlltags)
test_sents = conll2000.chunked_sents("test.txt", chunk_types=["NP"])
train_sents = conll2000.chunked_sents("train.txt", chunk_types=["NP"])
unigram_chunker = UnigramChunker(train_sents)
print(unigram_chunker.evaluate(test_sents))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。