# -*- coding: utf-8 -*-
'''
本脚本为 《Python 自然语言处理》第六章预测性别的中文扩展（预测中文名字）

'''
__author__ = 'Jinkey'


# from nltk.corpus import names
from nltk import NaiveBayesClassifier, classify, DecisionTreeClassifier,FreqDist
import random
import pandas as pd


# ====================根据姓名预测性别=================================

# def gender_features(word):
#     if len(word) == 1:
#         two = word[-1]
#     return {'one': word[-1]}
#
# data = pd.read_csv('data/name.csv')
# names = []
# for i in range(0, data["姓名"].count()):
#     name = str(data.values[:, 0][i]).decode(encoding="utf-8", errors="strict")
#     gender = str(data.values[:, 1][i]).decode(encoding="utf-8", errors="strict")
#     row = (name, gender)
#     names.append(row)
#
# random.shuffle(names) # 打乱顺序
#
# featuresets = [(gender_features(name), gender) for (name, gender) in names]
#
# train_set, test_set = featuresets[175894:], featuresets[: 175894]
# classifier = NaiveBayesClassifier.train(train_set)
# # for (name, gender) in names[: 175894]:
# #     guess = classifier.classify(gender_features(name))
# #     if guess == u"女":
# #         print name, guess, gender
#
# print u"预测性别为: " + classifier.classify(gender_features(u"陈煜邦"))
# # print classifier.show_most_informative_features()
# print "模型准确率: " + str(classify.accuracy(classifier, test_set))


# ====================文档分类=================================
# from nltk.corpus import movie_reviews
# from nltk import FreqDist
# documents = [(list(movie_reviews.words(fileid)), category)
#              for category in movie_reviews.categories()
#              for fileid in movie_reviews.fileids(category)]
# random.shuffle(documents)
#
# all_words = FreqDist(w.lower() for w in movie_reviews.words())
# word_features = all_words.keys()[:2000]
#
#
# def document_features(document):
#     document_words = set(document)
#     features = dict([])
#     for word in word_features:
#         features['%s' % word] = (word in document_words)
#     return features
#
# # print document_features(movie_reviews.words('pos/cv957_8737.txt'))
#
# featuresets = [(document_features(d), c) for (d,c) in documents]
# train_set, test_set = featuresets[100:], featuresets[:100]
# classifier = NaiveBayesClassifier.train(train_set)
# print classify.accuracy(classifier, test_set)

from nltk.corpus import brown
suffix_fdist = FreqDist()
for word in brown.words():
    word = word.lower()
    suffix_fdist[word[-1:]] += 1
    suffix_fdist[word[-2:]] += 1
    suffix_fdist[word[-3:]] += 1

common_suffixes = suffix_fdist.keys()[:100]
print common_suffixes

# ====================熵的计算=================================
import math
def entropy(labels):
    freqdist = FreqDist(labels)
    probs = [freqdist.freq(l) for l in FreqDist(labels)]
    return -sum([p * math.log(p,2) for p in probs])

print entropy(['male', 'male', 'male', 'male'])
print entropy(['male', 'female', 'male', 'male'])
print entropy(['female', 'male', 'female', 'male'])
print entropy(['female', 'female', 'male', 'female'])
print entropy(['female', 'female', 'female', 'female'])

