import re
from pandas import DataFrame
import math
import jieba
import jieba.analyse
import pandas as pd

raw = pd.read_csv("./金庸-射雕英雄传txt精校版.txt",names=['txt'], sep='aaa', encoding="gbk", engine='python')
# 章节判断用变量预处理
def m_head(tmpstr):
    return tmpstr[:1]
def m_mid(tmpstr):
    return tmpstr.find("回 ")
raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)
# 章节判断
chapnum = 0
for i in range(len(raw)):
    if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30:
        chapnum += 1
    if chapnum >= 40 and raw['txt'][i] == "附录一：成吉思汗家族":
        chapnum = 0
    raw.loc[i, 'chap'] = chapnum
del raw['head']
del raw['mid']
del raw['len']
#章节判断
tmpchap = raw[raw['chap'] == 4].copy()
tmpchap['paraidx'] = tmpchap.index
new_chap = tmpchap['txt']
new_case = str(new_chap)
def boom(para):
    para = re.sub('([，。！？\?])([^”’])', r"\1\n\2", para)
    para = re.sub('(\.{6})([^”’])', r"\1\n\2", para)
    para = re.sub('(\…{2})([^”’])', r"\1\n\2", para)
    para = re.sub('([。！？\?][”’])([^，。！？\?])', r'\1\n\2', para)
    para = para.rstrip()
    return para.split("\n")
sents = boom(new_case)
sents_new = [i.replace(' ','') for i in sents]
data = {'txt':sents_new,'chap':7}
new_data = DataFrame(data)
'''得到的章节'''
list_tmpchap = tmpchap.values.tolist()
def clear():
    '''分词'''
    tmpdf = pd.read_csv('../TF-IDF/停用词.txt',
                        names=['w'], sep='aaa', encoding='utf-8', engine='python')
    tmpdf.head()
    case3 = [w for w in jieba.cut(list_tmpchap) if w not in list(tmpdf.w)]
    return case3
def CreatWord(mystopword):
    """
    构建停用词列表
    :param stopwordpath: 字符串类型的停用词txt文件路径
    :return: List
    """
    stop_words = mystopword
    stopwords = [line.strip() for line in open(stop_words, 'r', encoding='utf-8').readlines()]
    stopwords.append('‘','@','','!','…')
    stopwords.append('\n','\r\n','\u3000','\r')
    return stopwords
def CreatIdf():
    '''
    Idf语料库
    case3：分词好的文章
    '''
    case3=clear()
    for i in range(len(case3)):
        case3[i] = case3[i].replace('\n', '')

    Idf_dic = {}
    number_chapter = len(case3)#文章数量

    for i in range(len(case3)):
        new_chapter = case3[i].split(' ')
        for word in set(new_chapter):
            if len(word) > 1:
                Idf_dic[word] = Idf_dic.get(word, 0) + 1

    for k, v in Idf_dic.items():
        p = '%.10f' % (math.log(number_chapter / (1.0 + v)))  # 结合上面的tf-idf算法公式
        if k > u'\u4e00' and k <= u'\u9fa5':  # 判断key值全是中文
            Idf_dic[k] = p

    with open('idf.txt.big', 'w', encoding='utf-8') as f:
        for k in Idf_dic:
            if k != '\n':
                f.write(k + ' ' + str(Idf_dic[k]) + '\n')  # 写入txt文件

def Keyword(list_tmpchap):
    # 应用自定义词典改善分词效果
    jieba.load_userdict('./金庸小说词库.txt') # dict为自定义词典的路径
    # 在TFIDF计算中直接应用停用词表
    jieba.analyse.set_stop_words('./停用词.txt')
    #提取关键词
    TFres = jieba.analyse.extract_tags(list_tmpchap.txt[1], withWeight = True)
    print((TFres[:10]))
    # 使用自定义TF-IDF频率文件
    jieba.analyse.set_idf_path("./idf.txt.big")

if __name__ == '__main__':
    CreatIdf()#读取分词内容，创建文章idf与粮库
    Keyword(list_tmpchap)#查询关键词