#统计词频并绘图
import nltk

tokens = ['my', 'dog', 'has', 'flea', 'problems', 'help', 'please',
          'maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid',
          'my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him']
# 统计词频
freq = nltk.FreqDist(tokens)

# 输出词和相应的频率
for key, val in freq.items():
    print(str(key) + ':' + str(val))

# 可以把最常用的5个单词拿出来
standard_freq = freq.most_common(5)
print(standard_freq)

# 绘图函数为这些词频绘制一个图形
freq.plot(20, cumulative=False)


# nltk库分句
# from nltk.tokenize import sent_tokenize
# mytext = "Hello Adam, how are you? I hope everything is going well. Today is a good day, see you dude."
# print(sent_tokenize(mytext))

# import nltk
# text = "Hello Adam, how are you? I hope everything is going well. Today is a good day, see you dude."
# sents = nltk.sent_tokenize(text)
# print(sents)
# # words = [nltk.word_tokenize(sentence) for sentence in sents ]
# # print(words)
# words = nltk.word_tokenize(text)
# print(words)
#nltk库分词例子
# import nltk
# from nltk.tokenize import word_tokenize
# mytext = "Hello Mr. Adam, how are you? I hope everything is going well. Today is a good day, see you dude."
# print(word_tokenize(mytext))

#词性标注例子
# import nltk
# text=nltk.word_tokenize('what does the fox say')
# print(text)
# print(nltk.pos_tag(text))

#分词例子
# from pyhanlp import *
# sentence = "下雨天地面积水"
#
# # 返回一个list，每个list是一个分词后的Term对象，可以获取word属性和nature属性，分别对应的是词和词性
# terms = HanLP.segment(sentence )
# for term in terms:
# 	print(term.word,term.nature)

#关键词提取与自动摘要(使用了TextRank与TextRankSentence算法实现)
# from pyhanlp import *
#
# document = "水利部水资源司司长陈明忠9月29日在国务院新闻办举行的新闻发布会上透露，" \
#            "根据刚刚完成了水资源管理制度的考核，有部分省接近了红线的指标，" \
#            "有部分省超过红线的指标。对一些超过红线的地方，陈明忠表示，对一些取用水项目进行区域的限批，" \
#            "严格地进行水资源论证和取水许可的批准。"
#
# # 提取document的两个关键词
# print(HanLP.extractKeyword(document, 2))
#
# # 提取ducument中的3个关键句作为摘要
# print(HanLP.extractSummary(document, 3))

#文本分类
# import os
#
# from pyhanlp import SafeJClass
# from tests.test_utility import ensure_data
#
# NaiveBayesClassifier = SafeJClass('com.hankcs.hanlp.classification.classifiers.NaiveBayesClassifier')
# IOUtil = SafeJClass('com.hankcs.hanlp.corpus.io.IOUtil')
# sogou_corpus_path = ensure_data('搜狗文本分类语料库迷你版',
#                                 'http://file.hankcs.com/corpus/sogou-text-classification-corpus-mini.zip')
#
#
# def train_or_load_classifier():
#     model_path = sogou_corpus_path + '.ser'
#     if os.path.isfile(model_path):
#         return NaiveBayesClassifier(IOUtil.readObjectFrom(model_path))
#     classifier = NaiveBayesClassifier()
#     classifier.train(sogou_corpus_path)
#     model = classifier.getModel()
#     IOUtil.saveObjectTo(model, model_path)
#     return NaiveBayesClassifier(model)
#
#
# def predict(classifier, text):
#     print("《%16s》\t属于分类\t【%s】" % (text, classifier.classify(text)))
#     # 如需获取离散型随机变量的分布，请使用predict接口
#     # print("《%16s》\t属于分类\t【%s】" % (text, classifier.predict(text)))
#
#
# if __name__ == '__main__':
#     classifier = train_or_load_classifier()
#     predict(classifier, "C罗获2018环球足球奖最佳球员 德尚荣膺最佳教练")
#     predict(classifier, "英国造航母耗时8年仍未服役 被中国速度远远甩在身后")
#     predict(classifier, "研究生考录模式亟待进一步专业化")
#     predict(classifier, "如果真想用食物解压,建议可以食用燕麦")
#     predict(classifier, "通用及其部分竞争对手目前正在考虑解决库存问题")

#情感分析
# from pyhanlp import *
# from tests.test_utility import ensure_data
#
# IClassifier = JClass('com.hankcs.hanlp.classification.classifiers.IClassifier')
# NaiveBayesClassifier = JClass('com.hankcs.hanlp.classification.classifiers.NaiveBayesClassifier')
# # 中文情感挖掘语料-ChnSentiCorp 谭松波
# chn_senti_corp = ensure_data("ChnSentiCorp情感分析酒店评论", "http://file.hankcs.com/corpus/ChnSentiCorp.zip")
#
#
# def predict(classifier, text):
#     print("《%s》 情感极性是 【%s】" % (text, classifier.classify(text)))
#
#
# if __name__ == '__main__':
#     classifier = NaiveBayesClassifier()
#     #  创建分类器，更高级的功能请参考IClassifier的接口定义
#     classifier.train(chn_senti_corp)
#     #  训练后的模型支持持久化，下次就不必训练了
#     predict(classifier, "前台客房服务态度非常好！早餐很丰富，房价很干净。再接再厉！")
#     predict(classifier, "结果大失所望，灯光昏暗，空间极其狭小，床垫质量恶劣，房间还伴着一股霉味。")
#     predict(classifier, "可利用文本分类实现情感分析，效果不是不行")

#依存句法分析
# from pyhanlp import *
# print(HanLP.parseDependency("徐先生还具体帮助他确定了把画雄鹰、松鼠和麻雀作为主攻目标。"))

#命名实体
# from pyhanlp import *
#
# CRFnewSegment = HanLP.newSegment("crf")
# term_list = CRFnewSegment.seg("译智社的田丰要说的是这只是一个hanlp命名实体识别的例子")
# print(term_list)




#类似的工具包还有许多，比如国内常用的fastnlp与哈工大的ltp还有清华的以及中科院的nlpir