#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : jieba_gov_report.py
# @Author: evenvi
# @Date  : 19-4-28
# @Desc  : 使用Jieba对2019政府工作报告分词
import codecs
import uniout # 中文显示


# 读取待分词文本数据
def get_content(path):
    with codecs.open(path, 'r', encoding='gbk', errors='ignore') as f:
    # with open(path, 'r') as f:
        content = ''
        for l in f:
            l = l.strip()
            content += l
        return content

# 定义高频词函数
def get_TF(words, topK=10):
    tf_dic = {}
    for w in words:
        tf_dic[w] = tf_dic.get(w, 0) + 1
    return sorted(tf_dic.items(), key=lambda x:x[1], reverse=True)[:topK]

def main():
    import glob
    import random
    import jieba

    files = glob.glob("./news/C000013/*.txt")
    corpus = [get_content(x) for x in files]

    sample_inx = random.randint(0, len(corpus))
    split_words = list(jieba.cut(corpus[sample_inx]))
    print('样本之一：'+ corpus[sample_inx].encode('utf-8'))
    print('样本分词效果:' + '/'.join(split_words).encode('utf-8'))
    print('样本的topK(10)词语' + str(get_TF(split_words)))

if __name__ == '__main__':
    main()

