#!/usr/bin/env python
# -*- coding:utf-8 -*-

from nltk.corpus import names
import random
import nltk
from nltk.corpus import movie_reviews
from nltk.corpus import brown 

# 第6章 学习分类文本
# 6.1 有监督分类

# 特征提取器函数建立一个字典,包含有关给定名称的相关信息
"""
def gender_features(word):
    return {'last_letter': word[-1]}

print(gender_features('Shrek'))

# 定义了一个特征提取器,准备一个例子和对应类标签的链表
names = ([(name,'male') for name in names.words('male.txt')] +
        [(name, 'female') for name in names.words('female.txt')])
print(random.shuffle(names))
# 接下来使用特征提取器处理名称数据,并划分特征集的结果链表为一个训练集和一个测试集.
# 训练集用于训练一个新的"朴素贝叶斯"分类器.
featuresets = [(gender_features(n),g) for (n,g) in names]
train_set,test_set = featuresets[500:],featuresets[:500]
classifier = nltk.NaiveBayesClassifier.train(train_set)
# 测试一些没有出现在训练数据中的名字:
print(classifier.classify(gender_features('Neo')))
print(classifier.classify(gender_features('Trinity')))
print(nltk.classify.accuracy(classifier,test_set))
print(classifier.show_most_informative_features(5))
"""
# 此列表显示训练集中以a结尾的名字中女性是男性的38倍,而以k结尾名字中男性是女性的31倍.
# 这些比率称为似然比,可以用于比较不同特征--结果关系.
"""




# 选择正确的特征
# 一个特征提取器,过拟合性别特征,这个特征提取器返回的特征集包括大量指定的特征,
# 导致对于相对较小的名字语料库过拟合.
def gender_features2(name):
    features = {}
    features["firstletter"] = name[0].lower()
    features["lastletter"] = name[-1].lower()
    for letter in 'abcdefghijklmnopqrstuvwxyz':
        features["count(%s)" % letter] = name.lower().count(letter)
        features["has(%s)" % letter] = (letter in name.lower())
    return features

print(gender_features2('John'))


featuresets = [(gender_features2(n),g) for (n,g) in names]
train_set,test_set = featuresets[500:],featuresets[:500]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier,test_set))
# 一旦初始特征集被选定,完善特征集的一个方法是错误分析. 
# 选择一个开发集,包含用于创建模型的语料数据. 然后将这种开发集分为训练集和开发测试集.
train_names = names[1500:]
devtest_names = names[500:1500]
test_names = names[:500]
# 训练集用于训练模型,开发测试集用于进行错误分析,测试集用于系统的最终评估.

# 使用训练集训练一个模型,然后再开发测试集上运行.
train_set = [(gender_features(n),g) for (n,g) in train_names]
devtest_set = [(gender_features(n),g) for (n,g) in devtest_names]
test_set = [(gender_features(n),g) for (n,g) in test_names]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier,devtest_set))

# 使用开发测试集,生成一个分类器预测名字性别时的错误列表
errors = []
for (name,tag) in devtest_names:
    guess = classifier.classify(gender_features(name))
    if guess != tag:
        errors.append((tag,guess,name))

for (tag,guess,name) in sorted(errors):
    print('correct={} guess={} name={}'.format(tag,guess,name))    
"""







# 调整特征提取器包括两个字母后缀的特征
"""
def gender_features(word):
    return {'suffix1': word[-1:],
        'suffix2': word[-2:]}
# 使用新的特征提取器重建分类器
names = ([(name,'male') for name in names.words('male.txt')] +
        [(name, 'female') for name in names.words('female.txt')])
print(random.shuffle(names))
train_names = names[1500:]
devtest_names = names[500:1500]
train_set = [(gender_features(n),g) for (n,g) in train_names]
devtest_set = [(gender_features(n),g) for (n,g) in devtest_names]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier,devtest_set))
"""





# 文档分类
# 使用语料库,可建立分类器,自动给新文档添加适当的类别标签.
# 选择电影评论语料库,将每个评论归类为正面或负面.
"""
documents = [(list(movie_reviews.words(fileid)),category)
            for category in movie_reviews.categories()
            for fileid in movie_reviews.fileids(category)]
print(random.shuffle(documents))


# 一个文档分类的特征提取器,其特征表示每个词是否在一个给定的文档中.
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words.keys())[:2000]
def document_features(document):
    document_words = set(document)
    features = {}
    for word in word_features:
        features['contains({})'.format(word)] = (word in document_words)
    return features

print(document_features(movie_reviews.words('pos/cv957_8737.txt')))


# 训练和测试一个分类器进行文档分类
featuresets = [(document_features(d),c) for (d,c) in documents]
train_set,test_set = featuresets[100:],featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier,test_set))
print(classifier.show_most_informative_features(5))
"""





# 探索上下文语境
# 使用依赖上下文的特征提取器定义一个词性标记分类器
# 一个词性分类器,它的特征检测器检查一个词出现的上下文以便决定应该分配的词性标记,前面的词被作为一个特征.
"""
def pos_features(sentence,i):
    features = {"suffix(1)": sentence[i][-1:],
                "suffix(2)": sentence[i][-2:],
                "suffix(3)": sentence[i][-3:]}

    if i == 0:
        features["prev-word"] = "<START>"
    else:
        features["prev-word"] = sentence[i-1]
    return features

print(pos_features(brown.sents()[0],8))

tagged_sents = brown.tagged_sents(categories='news')
featuresets = []
for tagged_sent in tagged_sents:
    untagged_sent = nltk.tag.untag(tagged_sent)
    for i,(word,tag) in enumerate(tagged_sent):
        featuresets.append(
            (pos_features(untagged_sent,i),tag)
        )

size = int(len(featuresets) * 0.1)
train_set,test_set = featuresets[size:],featuresets[:size]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier,test_set))
"""







# 序列分类
# 一种序列分类器策略,称为连续分类或贪婪序列分类,是为第一个输入找到最有可能的类标签,然后使用这个问题的答案帮助找到下一个输入的最佳的标签.
# 例: 使用连续分类器进行词性标注.
"""
def pos_features(sentence,i,history):
    features = {"suffix(1)": sentence[i][-1:],
                "suffix(2)": sentence[i][-2:],
                "suffix(3)": sentence[i][-3:]}

    if i == 0:
        features["prev-word"] = "<START>"
        features["prev-tag"] = "<START>"
    else:
        features["prev-word"] = sentence[i-1]
        features["prev-tag"] = history[i-1]
    return features

class ConsecutivePosTagger(nltk.TaggerI):
    def __init__(self,train_sents):
        train_set = []
        for tagged_sent in train_sents:
            untagged_sent = nltk.tag.untag(tagged_sent)
            history = []
            for i, (word,tag) in enumerate(tagged_sent):
                featureset = pos_features(untagged_sent,i,history)
                train_set.append((featureset,tag))
                history.append(tag)

        self.classifier = nltk.NaiveBayesClassifier.train(train_set)

    def tag(self,sentence):
        history = []
        for i,word in enumerate(sentence):
            featureset = pos_features(sentence,i,history)
            tag = self.classifier.classify(featureset)
            history.append(tag)
        return zip(sentence,history)


tagged_sents = brown.tagged_sents(categories='news')   
size = int(len(tagged_sents) * 0.1)
train_sents,test_sents = tagged_sents[size:],tagged_sents[:size]
tagger = ConsecutivePosTagger(train_sents)
print(tagger.evaluate(test_sents))
"""






# 6.2 有监督分类的更多例子
# 句子分割
# 句子分割可以看作是一个标点符号的分类任务;
# 第一步是获得一些已被分割成句子的数据,将它转换成一种适合提取特征的形式.
"""
sents = nltk.corpus.treebank_raw.sents()
# tokens是单独句子标识符的合并链表.
tokens = []
# boundaries是一个包含所有句子边界标识符索引的集合.
boundaries = set()
offset = 0
for sent in nltk.corpus.treebank_raw.sents():
    tokens.extend(sent)
    offset += len(sent)
    boundaries.add(offset-1)

# 下一步,需要指定用于决定标点是否表示句子边界的数据特征.
def punct_features(tokens,i):
    return {'next-word-capitalized': tokens[i+1][0].isupper(),
            'prevword': tokens[i-1].lower(),
            'punct':tokens[i],
            'prev-word-is-one-char': len(tokens[i-1]) == 1}
# 基于这一特征提取器,可通过选择所有的标点符号创建一个加标签的特征集的链表,
# 然后标注它们是否是边界标识符:
featuresets = [(punct_features(tokens,i),(i in boundaries))
                for i in range(1,len(tokens)-1)
                if tokens[i] in '.?!']
# 使用这些特征集,训练和评估一个标点符号分类器:
size = int(len(featuresets) * 0.1)
train_set,test_set = featuresets[size:],featuresets[:size]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier,test_set))
"""




# 使用这种分类器进行断句,检查每个标点符号,看它是否是作为一个边界标识符,在边界标识符处分割词链表.
# 基于分类的断句器:
"""
def segment_sentences(words):
    start = 0
    sents = []
    for i, word in words:
        if word in '.?!' and classifier.classify(words,i) == True:
            sents.append(words[start:i+1])
            start = i+1
    
    if start < len(words):
        sents.append(words[start:])
"""




# 识别对话行为类型
# 识别对话中言语下的对话行为.
# 利用数据建立一个分类器,识别新的即时消息帖子的对话行为类型.
# 第一步是提取基本的消息数据. 将调用xml_posts()来得到一个数据结构,表示每个帖子的XML注释:
"""
posts = nltk.corpus.nps_chat.xml_posts()[:10000]
# 将定义一个简单的特征提取器,检查帖子包含什么词:
def dialogue_act_features(post):
    features = {}
    for word in nltk.word_tokenize(post):
        features['contains(%s)' % word.lower()] = True 
    return features

# 通过为每个帖子提取特征(使用post.get('class')获得一个帖子的对话行为类型)构造训练和测试数据,并创建一个新的分类器:
featuresets = [(dialogue_act_features(post.text),post.get('class'))
                for post in posts]
size = int(len(featuresets) * 0.1)
train_set,test_set = featuresets[size:],featuresets[:size]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier,test_set))
"""






# 识别文字蕴含
"""
# "认识文字蕴含"的特征提取器,RTEFeatureExtractor类建立了一个除去一些停用词后再文本和假设中都有的词汇包,然后计算重叠和差异.
def rte_features(rtepair):
    extractor = nltk.RTEFeatureExtractor(rtepair)
    features = {}
    features['word_overlap'] = len(extractor.overlap('word'))
    features['word_hyp_extra'] = len(extractor.hyp_extra('word'))
    features['ne_overlap'] = len(extractor.overlap('ne'))
    features['ne_hyp_extra'] = len(extractor.hyp_extra('ne'))
    return features

# 检查前面显示的文本/假设对34的一些属性.
rtepair = nltk.corpus.rte.pairs(['rte3_dev.xml'])[33]
extractor = nltk.RTEFeatureExtractor(rtepair)
print(extractor.text_words)
print(extractor.hyp_words)
print(extractor.overlap('word'))
print(extractor.overlap('ne'))
print(extractor.hyp_extra('word'))
"""




# 6.3 评估
# 6.3.1 测试集
# 可通过从一个反映单一的文体的数据源随机分配句子,创建训练集和测试集.
"""
tagged_sents = list(brown.tagged_sents(categories='news'))
random.shuffle(tagged_sents)
size = int(len(tagged_sents) * 0.1)
train_set,test_set = tagged_sents[size:],tagged_sents[:size]
print(train_set)
print(test_set)
# 一个好的做法是:确保训练集和测试集来自不同的文件.
file_ids = brown.fileids(categories='news')
size = int(len(file_ids) * 0.1)
train_set = brown.tagged_sents(file_ids[size:])
test_set = brown.tagged_sents(file_ids[:size])
print(train_set)
print(test_set)
# 从与训练集中文档联系更少的文档中获取测试集.
train_set = brown.tagged_sents(categories='news')
test_set = brown.tagged_sents(categories='fiction')
print(train_set)
print(test_set)
"""



# 6.3.2 准确度
# 一个名字性别分类器,在包含80个名字的测试集上预测正确的名字有60个,它有60/80=75%的准确度.
# nltk.classify.accuracy()函数
"""
train_set = brown.tagged_sents(categories='news')
test_set = brown.tagged_sents(categories='fiction')
classifier = nltk.NaiveBayesClassifier.train(train_set)
print('Accuracy: %4.2f' % nltk.classify.accuracy(classifier,test_set))
"""







# 6.3.4 混淆矩阵
# unigram标注器生成一个混淆矩阵
"""
def tag_list(tagged_sents):
    return [tag for sent in tagged_sents for (word,tag) in sent]

def apply_tagger(tagger,corpus):
    return [tagger.tag(nltk.tag.untag(sent)) for sent in corpus]

gold = tag_list(brown.tagged_sents(categories='editorial'))
test = tag_list(apply_tagger(t2, brown.tagged_sents(categories='editorial')))
cm = nltk.ConfusionMatrix(gold,test)
# NameError: name 't2' is not defined
"""





# 6.4 决策树
# 6.4.1 熵和信息增益
# 计算标签链表的熵
import math
def entropy(labels):
    freqdist = nltk.FreqDist(labels)
    probs = [freqdist.freq(l) for l in nltk.FreqDist(labels)]
    return sum([p * math.log(p,2) for p in probs])

print(entropy(['male','male','male','male']))
print(entropy(['male','female','male','male']))
print(entropy(['female','male','female','male']))
print(entropy(['female','female','male','female']))
print(entropy(['female','female','female','female']))

