#!/usr/bin/env python

# -*- coding: utf-8 -*-

import math
import os
import jieba
def get_cut_word(content):
    outstr = []
    content_seg = jieba.cut(content.strip(), cut_all=False) # 为文件内容分词
    stopwords =['机组']

    for word in content_seg:   # 去除停用词
      if word not in stopwords:
        if word != '\t' and word != '\n':
          outstr.append(word)
    for word in outstr:  # 删除空格
      if ' ' in outstr:
        outstr.remove(' ')
    print('分词结束。')
    return outstr

def get_all_dict(outstr, all_dict):
    total=0
    temp_dict = {}
    total += 1
    for word in outstr:
      # print(word)
      temp_dict[word] = 1 # temp_dict记录的是只要该文档中包含 key 就记录1（不需要记录个数），否则为0.
      # print(temp_dict)
      for key in temp_dict:  # all_dict的 value 是包含 key 文档的个数
        num = all_dict.get(key, 0)
        all_dict[key] = num + 1
    print('word字典构造结束')
    return all_dict

def get_idf_dict(all_dict, total):

    idf_dict = {}
    for key in all_dict:
        # print(all_dict[key])
        w = key.encode('utf-8', 'ignore').decode('utf-8')
        p = '%.10f' % (math.log10(total/(all_dict[key]+1)))
        if w > u'\u4e00' and w <= u'\u9fa5': # 通过if判断语句，保证字典的key 都是汉字。
            idf_dict[w] = p
    print('IDF字典构造结束')
    return idf_dict



def readfile(path):
    """
    读取文本内容
    """
    ftextlist = []
    f = open(path, 'r', encoding='utf-8')  # gb18030兼容性好，是比utf-8更新的国家标准
    line = f.readline()
    while line:
        line = line.strip('\n').replace(u'\u3000', u' ')  # '\u3000'是全角到空白符
        ftextlist.append(line)
        line = f.readline()

    filetxt = "".join(ftextlist)

    # replace()函数内要使用'utf-8'编码。
    filetxt = filetxt.replace("\r\n", "")  # 删除换行和多余的空格
    filetxt = filetxt.replace(" ", "")
    f.close()
    print(f'读取文件{path}完毕。')
    return filetxt


def savefile(path, content):
    """
    保存文件
    """
    with open(path, 'a') as f:
        f.write(content)
    print(f'存储文件{path}完毕。')

all_dict = dict()
total = 0
content = readfile(('./text/nnn.txt'))
outstr = get_cut_word(content)  # 为文件内容分词
all_dict = get_all_dict(outstr, all_dict)
idf_dict = get_idf_dict(all_dict, 1)
print(all_dict)
with open('key.txt', 'w', encoding='utf-8') as fw:
    for k in idf_dict:
        if k != '\n':
            print(k)
            fw.write(k + ' ' + idf_dict[k] + '\n')  # fw.wirte()一行行把字典写入txt。


corpus_path = './text'  # 存储语料库的路径，按照类别分
seg_path = './text'  # 拼出分词后语料的目录

path = "./text" #文件夹目录
catelist= os.listdir(path)

#
# print(catelist)
# # 获取每个目录下所有的文件
# for mydir in catelist:
#     class_path = corpus_path + mydir + "/"  # 拼出分类子目录的路径
#     # print(class_path)
#     seg_dir = seg_path + mydir + "/"  # 拼出分词后语料分类目录
#     if not os.path.exists(seg_dir):  # 是否存在目录，如果没有创建
#         os.makedirs(seg_dir)
#     # print(seg_dir)
#     file_list = os.listdir(class_path)  # 获取class_path下的所有文件
#     for file_path in file_list:  # 遍历类别目录下文件
#         fullname = class_path + file_path  # 拼出文件名全路径
#         # print(fullname)
#         content = readfile('./text/nnn.txt').strip()  # 读取文件内容
#
#         outstr = get_cut_word(content)  # 为文件内容分词
#         savefile(seg_dir + file_path, "".join(outstr))  # 将处理后的文件保存到分词后语料目录
#
#         # 计算包含 word 的文档的个数
#         all_dict = get_all_dict(outstr, all_dict)
#
#     # 获取idf_dict字典
#     idf_dict = get_idf_dict(all_dict, total)
#     print(idf_dict)
#     # 保存为txt，这里必须要'utf-8'编码，不然jieba不识别。
#     with open('key.txt', 'w', encoding='utf-8') as fw:
#         for k in idf_dict:
#             if k != '\n':
#                 print(k)
#                 fw.write(k + ' ' + idf_dict[k] + '\n')  # fw.wirte()一行行把字典写入txt。

