#!/usr/bin/env python
# -*- coding:utf-8 -*-

import nltk
from nltk.corpus import brown 
from operator import itemgetter
from pickle import dump,load


# 5.1 使用词性标注器
"""
text = nltk.word_tokenize("And now for something completely different")
print(nltk.pos_tag(text))
# and是CC,并列连词; now和completely是RB,副词;for是IN,介词; something是NN,名词;different是JJ,形容词.

# NLTK提供了每个标记的文档,使用标记可以查询.

# 例如: 包括同形同音异义词
text = nltk.word_tokenize("They refuse to permit us to obtain the refuse permit")
print(nltk.pos_tag(text))
# refuse和permit以一般现在时动词(VBP)和名词(NN)形式出现.

# text.similar()方法为一个词w找出所有上下文wlww2,然后找出所有出现在相同上下文中的词w'.
text = nltk.Text(word.lower() for word in nltk.corpus.brown.words())
print(text.similar('woman'))
print('=='*20)
print(text.similar('bought'))
print('=='*30)
print(text.similar('over'))
print('-='*40)
print(text.similar('the'))
"""




# 5.2 标注语料库
# 表示已标注的标识符
"""
一个已标注的标识符使用一个由标识符和标记组成的元组来表示.
使用函数str2tuple()从表示一个已标注的标识符的标准字符串创建一个特殊元组.
tagged_token = nltk.tag.str2tuple('fly/NN')
print(tagged_token)
print(tagged_token[0])
print(tagged_token[1])
"""

"""
可直接从一个字符串构造一个已标注的标识符的链表.
第一步是对字符串分词以便能访问单独的词/标记字符串,然后将每一个转换成一个元组
(使用str2tuple())
"""
sent = """
The/AT grand/JJ jury/NN commented/VBD on/IN a/AT number/NN of/IN
other/AP topics/NNS ,/, AMONG/IN them/PPO the/AT Atlanta/NP and/CC
Fulton/NP-tl County/NN-tl purchasing/VBG departments/NNS which/WDT 
it/PP said/VBD ``/`` ARE/BER well/QL operated/VBN and/CC follow/VB
generally/R accepted/VBN practices/NNS which/WDT inure/VB to/IN the/AT
best/JJT interest/NN of/IN both/ABX governments/NNS "/" ./.

print([nltk.tag.str2tuple(t) for t in sent.split()])
print('-'*20)
print(nltk.corpus.brown.tagged_words())
print('='*20)
# 只要语料库包含已标注的文本,NLTK的语料库接口都将有一个tagged_words()方法.
print('-='*20)
print(nltk.corpus.nps_chat.tagged_words())
print('+'*20)
print(nltk.corpus.conll2000.tagged_words())
print('-'*20)
print(nltk.corpus.treebank.tagged_words())


print('-=='*20)
print(nltk.corpus.sinica_treebank.tagged_words())
print('==-'*20)
print(nltk.corpus.indian.tagged_words())
print(nltk.corpus.mac_morpho.tagged_words())
print(nltk.corpus.conll2002.tagged_words())
print(nltk.corpus.cess_cat.tagged_words())




# 哪些是布朗语料库的新闻类中最常见的：
brown_news_tagged = brown.tagged_words(categories='news')
tag_fd = nltk.FreqDist(tag for (word,tag) in brown_news_tagged)
# print(tag_fd.keys())
"""



# 名词
# 查看哪些词类出现在一个名词前,频率最高的在最前面.
"""
brown_news_tagged = brown.tagged_words(categories='news')
word_tag_pairs = nltk.bigrams(brown_news_tagged)
print(list(nltk.FreqDist(a[1] for (a,b) in word_tag_pairs if b[1] == 'N')))
# 名词出现在限定词和形容词之后,包括数字形容词(数词)
"""




# 动词
# 按频率排序所有动词:
"""
wsj = nltk.corpus.treebank.tagged_words()
word_tag_fd = nltk.FreqDist(wsj)
print([word + "/" + tag for (word, tag) in word_tag_fd if tag.startswith('V')])
"""
"""
频率分布中计算的项目是词-标记对. 词汇和标记是成对的.
把词汇作为条件,标记作为事件,使用条件-事件对的链表初始化一个条件频率分布.
"""
"""
wsj = nltk.corpus.treebank.tagged_words()
cfd1 = nltk.ConditionalFreqDist(wsj)
print(cfd1['yield'].keys())
print(cfd1['cut'].keys())

# 颠倒配对的顺序,标记作为条件,词汇作为事件.
cfd2 = nltk.ConditionalFreqDist((tag, word) for (word,tag) in wsj)
print(cfd2['VN'].keys())
"""



# 未简化的标记
# 找出所有以NN开始的标记,并为每个标记提供了几个示例词汇.
# 标记有后缀修饰符: -NC表示引用,-HL表示标题中的词. -TL表示标题.
# 找出最频繁的名词标记的程序
"""
def findtags(tag_prefix,tagged_text):
    cfd = nltk.ConditionalFreqDist((tag,word) for (word,tag) in tagged_text
        if tag.startswith(tag_prefix))
    return dict((tag,list(cfd[tag].keys())[:5]) for tag in cfd.conditions())

tagdict = findtags('NN',nltk.corpus.brown.tagged_words(categories='news'))
for tag in sorted(tagdict):
    print(tag,tagdict[tag])
"""





# 探索已标注的语料库
# 找出涉及特定标记和词序列的词.
# 考虑句子中的每个三词窗口,检查是否符合标准,如果标记匹配,输出对应的词.
# 使用POS标记寻找三词短语.
"""
def process(sentence):
    for (w1,t1),(w2,t2),(w3,t3) in nltk.trigrams(sentence):
        if (t1.startswith('V') and t2 == 'TO' and t3.startswith('V')):
            print(w1,w2,w3)

for tagged_sent in brown.tagged_sents():
    process(tagged_sent)
"""


# 标记之间的区别
"""
brown_news_tagged = brown.tagged_words(categories='news')
data = nltk.ConditionalFreqDist((word.lower(),tag)
        for (word,tag) in brown_news_tagged)

for word in data.conditions():
    if len(data[word]) > 3:
        tags = data[word].keys()
        print(word,' '.join(tags))
"""





# 5.3 使用Python字典映射词及其属性
"""
(word,tag)形式的一个已标注词是词和词性标记的关联.
词性标注: 将创建分配一个标记给一个词的程序,标记是在给定上下文中最可能的标记.
这个过程是从词到标记的映射.
在Python中最自然的方式存储映射是使用所谓的字典数据类型
"""
# Python字典
# 字典数据类型: 用来做任意类型之间的映射.
# 定义pos为空字典,添加四个项目,指定一些词的词性.使用熟悉的方括号将条目添加到字典.
"""
pos = {}
print(pos)
pos['colorless'] = 'ADJ' # 词性是形容词.
print(pos)
pos['ideas'] = 'N'
pos['sleep'] = 'V'
pos['furiously'] = 'ADV'
print(pos)
# 使用键来检索值
print(pos['ideas'])
print(pos['colorless'])

# 找键,将字典转换成一个链表,或在需要使用链表的地方使用字典.
print(list(pos))
print(sorted(pos))
print([w for w in pos if w.endswith('s')])


# 使用for循环输出字典的内容
for word in sorted(pos):
    print(word + ":",pos[word])

# 字典的方法keys(),values()和items()可以访问作为单独的链表的键,值以及键-值对.
print(pos.keys())
print(pos.values())
print(pos.items())
print('-='*30)
for key, val in sorted(pos.items()):
    print(key + ":", val)
"""







# 定义字典
# 使用键-值对格式创建字典.
# 两种方法: {} 或者dict()
# 字典的键不可改变.

"""
pos = {'colorless':'ADJ','ideas':'N','sleep':'V','furiously':'ADV'}

# 默认字典
frequency = nltk.defaultdict(int)
frequency['colorless'] = 4
print(frequency['ideas'])
print(frequency['colorless'])

pos = nltk.defaultdict(list)
pos['sleep'] = ['N','V']
print(pos['ideas'])
print(pos['sleep'])
# 这些默认值是将其他对象转换为指定类型的函数.
"""

# 创建一个任一条目的默认值是'N'的字典. 当访问一个不存在的条目时,它会自动添加到字典.
"""
pos = nltk.defaultdict(lambda: 'N')
pos['colorless'] = 'ADJ'
print(pos['blog'])
print(pos.items())
"""

# lambda表达式
# lambda表达式没有指定参数,所以用不带参数的括号调用它.
"""
f = lambda: 'N'
print(f())

def g():
    return 'N'

print(f())
"""




# 创建一个默认字典,映射每个词为它们的替换词,最频繁的n个词将被映射到自己.
"""
alice = nltk.corpus.gutenberg.words('carroll-alice.txt')
vocab = nltk.FreqDist(alice)
v1000 = list(vocab)[:1000]
mapping = nltk.defaultdict(lambda: 'UNK')
for v in v1000:
    mapping[v] = v

alice2 = [mapping[v] for v in alice]
print(alice2[:100])
print(len(set(alice2)))
"""




# 递增地更新字典
# 首先初始化一个空的defaultdict,然后处理文本中每个词性标记.
# 每次遇到一个标记,就使用+=运算符递增它的计数.
# 递增地更新字典,按值排序
"""
counts = nltk.defaultdict(int)
for (word,tag) in brown.tagged_words(categories='news'):
    counts[tag] += 1

print(counts['N'])
print(list(counts))

print(sorted(counts.items(),key=itemgetter(1),reverse=True))
print([t for t,c in sorted(counts.items(), key=itemgetter(1),reverse=True)])
"""
"""
一个重要的按值排序一个字典, 按频率递减顺序显示词汇.
sorted()的第一个参数是要排序的项目.
它是由一个POS标记和一个频率组成的元组的链表,
第二个参数使用函数itemgetter()指定排序键, 
itemgetter(n)返回一个函数. 这个函数可以在一些其他序列对象上被调用获得这个序列的第n个元素的.
"""
"""
pair = ('NP',8336)
print(pair[1])
print(itemgetter(1)(pair))
# sorted()的最后一个参数指定项目是否应被按相反的顺序返回,即频率值递减.
"""
# 初始化一个defaultdict,然后使用for循环来更新其值.
# 按它们最后两个字母索引词汇
"""
last_letters = nltk.defaultdict(list)
words = nltk.corpus.words.words('en')
for word in words:
    key = word[-2:]
    last_letters[key].append(word)

print(last_letters['ly'])
print(last_letters['zy'])
"""
# 使用相同的模式创建一个颠倒顺序的词字典.
"""
words = nltk.corpus.words.words('en')
anagrams = nltk.defaultdict(list)
for word in words:
    key = " ".join(sorted(word))
    anagrams[key].append(word)

print(anagrams['aeilnrt'])


# NLTK以nltk.Index()形式提供一个创建defaultdict(list)
anagrams = nltk.Index((" ".join(sorted(w)),w) for w in words)
print(anagrams['aeilnrt'])
"""






# 颠倒字典
# 字典支持高效查找.
"""
counts = nltk.defaultdict(int)
for word in nltk.corpus.gutenberg.words('milton-paradise.txt'):
    counts[word] += 1

print([key for (key,value) in counts.items() if value == 32])
"""
# 只要得到字典中的所有键-值对,并创建一个新的值-键对字典.
"""
pos = {'colorless':'ADJ','ideas':'N','sleep':'V','furiously':'ADV'}
pos2 = dict((value,key) for (key,value) in pos.items())
print(pos2['N'])

# 使用append()积累词和每个词性.
pos.update({'cats':'N','scratch':'V','peacefully':'ADV','old':'ADJ'})
pos2 = nltk.defaultdict(list)
for key,value in pos.items():
    pos2[value].append(key)

print(pos2['ADV'])

# 查任意词性找到所有具有此词性的词.
pos2 = nltk.Index((value,key) for (key,value) in pos.items())
print(pos2['ADV'])
"""




# 5.4 自动标注
# 一个词的标记依赖于这个词和它在句子中的上下文.
"""
from nltk.corpus import brown 
brown_tagged_sents = brown.tagged_sents(categories='news')
brown_sents = brown.sents(categories='news')
"""



# 默认标注器
# 标注器是为每个标识符分配同样的标记.
"""
tags = [tag for (word,tag) in brown.tagged_words(categories='news')]
print(nltk.FreqDist(tags).max())

# 创建一个将所有词都标注成NN的标注器
brown_tagged_sents = brown.tagged_sents(categories='news')

raw = 'I do not like green eggs and ham, I do not like them Sam I am!'
tokens = nltk.word_tokenize(raw)
default_tagger = nltk.DefaultTagger('NN')
print(default_tagger.tag(tokens))
print(default_tagger.evaluate(brown_tagged_sents))
# 默认的标注器给每一个单独的词分配标记.
"""





# 正则表达式标注器
# 正则表达式标注器基于匹配模式分配标记给标识符.
"""
patterns = [
    (r'.*ing$','VBG'),
    (r'.*ed$','VBD'),
    (r'.*es$','VBZ'),
    (r'.*ould$','MD'),
    (r'.*\'s$','NN$'),
    (r'.*s$','NNS'),
    (r'^-?[0-9]+(.[0-9]+)?$','CD'),
    (r'.*','NN')
]
# 第一个匹配上的会被使用
# 建立一个标注器,并用它来标记一个句子.
brown_sents = brown.sents(categories='news')
brown_tagged_sents = brown.tagged_sents(categories='news')
regexp_tagger = nltk.RegexpTagger(patterns)
print(regexp_tagger.tag(brown_sents[3]))
print(regexp_tagger.evaluate(brown_tagged_sents))
"""





# 查询标注器
# 找出100个最频繁的词,存储它们最有可能的标记.
"""
brown_tagged_sents = brown.tagged_sents(categories='news')
fd = nltk.FreqDist(brown.words(categories='news'))
cfd = nltk.ConditionalFreqDist(brown.tagged_words(categories='news'))
most_freq_words = list(fd.keys())[:100]
likely_tags = dict((word,cfd[word].max()) for word in most_freq_words)
baseline_tagger = nltk.UnigramTagger(model=likely_tags)
print(baseline_tagger.evaluate(brown_tagged_sents))

# 查看在一些未标注的输入文本上做的怎么样:
sent = brown.sents(categories='news')[3]
print(baseline_tagger.tag(sent))
"""


# 分配默认标记NN, 要先使用查找表,如果它不能指定一个标记就使用默认标注器,这个过程叫回退.
# 通过指定一个标注器作为另一个标注器的参数.
# 现在查找标注器将只存储名词以外的词的词-标记对,只要它不能给一个词分配标记,它将会调用默认标注器.
# baseline_tagger = nltk.UnigramTagger(model=likely_tags,
        # backoff=nltk.DefaultTagger('NN'))

# 写一个程序来创建和评估具有一定范围的查找标注器
# 查找标注器的性能,使用不同大小的模型.
"""
def performance(cfd,wordlist):
    it = dict((word,cfd[word].max()) for word in wordlist)
    baseline_tagger = nltk.UnigramTagger(model=it,backoff=nltk.DefaultTagger('NN'))
    return baseline_tagger.evaluate(brown.tagged_sents(categories='news'))

def display():
    import pylab 
    words_by_freq = list(nltk.FreqDist(brown.words(categories='news')))
    cfd = nltk.ConditionalFreqDist(brown.tagged_words(categories='news'))
    sizes = 2 ** pylab.arange(15)
    perfs = [performance(cfd,words_by_freq[:size]) for size in sizes]
    pylab.plot(sizes,perfs,'-bo')
    pylab.title('Lookup Tagger Performance with Varying Model Size')
    pylab.xlabel('Model Size')
    pylab.ylabel('Performance')
    pylab.show()

print(display())
"""




# 5.5 N-gram标注
"""
一元标注
    一元标注器基于一个简单的统计算法: 对每个标识符分配这个独特的标识符最优可能的标记
    一个一元标注器的行为就像一个查找标注器,有一个更方便建立它的技术,称为训练.
"""
"""
brown_tagged_sents = brown.tagged_sents(categories='news')
brown_sents = brown.sents(categories='news')
unigram_tagger = nltk.UnigramTagger(brown_tagged_sents)
print(unigram_tagger.tag(brown_sents[2007]))
print(unigram_tagger.evaluate(brown_tagged_sents))
"""
"""
训练一个UnigramTagger,通过在初始化标注器时指定已标注的句子数据作为参数.
训练过程中涉及检查每个词的标记,将所有词的最可能的标记存储在一个字典里面,
这个字典存储在标注器内部.
"""

# 分离训练和测试数据
"""
size = int(len(brown_tagged_sents) * 0.9)
print(size)
train_sents = brown_tagged_sents[:size]
test_sents = brown_tagged_sents[size:]
unigram_tagger = nltk.UnigramTagger(train_sents)
print(unigram_tagger.evaluate(test_sents))
"""


# 一般的N-gram的标注
# 一个n-gram标注器时一个unigram标注器的一般化,它的上下文是当前词和它前面n-1个标识符的词性标记.
# 1-gram标注器时一元标注器另一个名称;即用于标注一个标识符的上下文的只是标识符本身.
# NgramTagger类使用一个已标注的训练语料库来确定对每个上下文哪个词性标记最优可能.
brown_sents = brown.sents(categories='news')
brown_tagged_sents = brown.tagged_sents(categories='news')
size = int(len(brown_tagged_sents) * 0.9)
test_sents = brown_tagged_sents[size:]
train_sents = brown_tagged_sents[:size]
bigram_tagger = nltk.BigramTagger(train_sents)
print(bigram_tagger.tag(brown_sents[2007]))

unseen_sent = brown_sents[4203]
print(bigram_tagger.tag(unseen_sent))
print(bigram_tagger.evaluate(test_sents))



# 组合标注器
"""
解决精度和覆盖范围之间的权衡的一个办法是尽可能的使用更精确的算法.
1. 尝试使用bigram标注器标注标识符.
2. 如果bigram标注器无法找到一个标记,尝试unigram标注器.
3. 如果unigram标注器也无法找到一个标记,使用默认标注器

t0 = nltk.DefaultTagger('NN')
t1 = nltk.UnigramTagger(train_sents,backoff=t0)
t2 = nltk.BigramTagger(train_sents,backoff=t1)
print(t2.evaluate(test_sents))
"""




# 性能限制
"""
cfd = nltk.ConditionalFreqDist(
    ((x[1],y[1],z[0]),z[1])
    for sent in brown_tagged_sents
    for x,y,z in nltk.trigrams(sent))
ambiguous_contexts = [c for c in cfd.conditions() if len(cfd[c]) > 1]
print(sum(cfd[c].N() for c in ambiguous_contexts) / cfd.N())
"""
# 调查标注器性能的方法，混淆矩阵.
# 用图表表示期望的标记与实际由标注器产生的标记.
"""
test_tags = [tag for sent in brown.sents(categories='editorial')
        for (word,tag) in t2.tag(sent)]
gold_tags = [tag for (word,tag) in brown.tagged_words(categories='editorial')]
print(nltk.ConfusionMatrix(gold,test))
"""




# 跨句子边界标注
# 一个n-gram标注器使用最近的标记作为为当前的词选择标记的指导.
# 当标记一个句子的第一个词时,trigram标注器将使用前面两个标识符的词性标记.
# 使用已标注句子的链表来训练,运行和评估标注器.
# 句子层面的n-gram标注
"""
brown_tagged_sents = brown.tagged_sents(categories='news')
brown_sents = brown.sents(categories='news')
size = int(len(brown_tagged_sents) * 0.9)
train_sents = brown_tagged_sents[:size]
test_sents = brown_tagged_sents[size:]
t0 = nltk.DefaultTagger('NN')
t1 = nltk.UnigramTagger(train_sents,backoff=t0)
t2 = nltk.BigramTagger(train_sents,backoff=t1)
print(t2.evaluate(test_sents))
"""






# 5.7 如何确定一个词的分类
# 在一般情况下,使用形态学,句法和语义线索确定一个词的类别.
