import re
import tempfile
import pandas as pd
file = open('金庸-射雕英雄传txt精校版.txt')
i=input("请输入第几回（例如：1）：")
keyStart = '第'+i+'回'
if i == '40':
    keyEnd = '附录一'
else:
    keyEnd = '\n' + '\n'
buff = file.read()
pat = re.compile(keyStart + '(.*?)' + keyEnd, re.S)
result = keyStart + pat.findall(buff)[0]
tmp_f =tempfile.TemporaryFile('w+t')
tmp_f = open(keyStart+".txt",'w',encoding = 'GBK')
tmp_f.write(result)
tmp_f.close()
raw_list = []
# 读入为数据框
f=open(keyStart+".txt",'r')

raw = pd.read_csv(keyStart+".txt",
                  names=["段落",  "段落号"],
                  sep=' ',
                  encoding ="GBK" ,
                  engine='python')
raw['段落号']=raw.index
# 将文章按句子划分
fen_ju=re.findall('(.*?[？。！；：](’|“)?)',f.read())
list_ju=[a[0] for a in fen_ju]
list_ju_index=list(range(1,len(list_ju)+1))
list_len_juzi=[len(b) for b in list_ju]
# 将数据框转化为以整段为成员的list格式
j = 1
all = []
while j<len(raw['段落号']):
    fen_duan = re.findall('(.*?[？。！；：](’|“)?)', raw['段落'][j])
    duan_ju = [k[0] for k in fen_duan]
    all.append(duan_ju)
    j = j+1
    continue
# 获取每句的段落号
n = 0
h = 0
duan_index=[]
for h in range(0,len(list_ju)):
    if list_ju[h] in all[n]:
        h = h + 1
        duan_index.append(h)
    elif list_ju[h] not in all[n]:
        n = n + 1
    continue
c=1
new_list=[]
for c in range(1,len(all)):
    duan_index.append(c)
duan_index.sort()
# 打印完整的数据框
list_duan_index= [i+1 for i in duan_index]
dic1={'句子':list_ju,
      '句子编号':list_ju_index,
      '段落号':list_duan_index,
      '句子长度':list_len_juzi}
sentences=pd.rawFrame(dic1)
f=open(keyStart+".txt",'r')
conten= f.readlines()


def Remove_stop_words(tmpstr):
    tmpdf = pd.read_csv('停用词.txt', names=['w'], sep='aaa', encoding='utf-8', engine='python')
    output_list = [w for w in jieba.cut(tmpstr) if w not in list(tmpdf.w)]
    str1 = ""
    for i in output_list:
        str1 += i
    return str1


def Custom_Dictionary(file_ame):
    dict = file_ame
    jieba.set_dictionary(dict)


def main1():
    # 选取第一回的文字，应用搜狗的细胞词库和停用词表，清理出干净的分词结果。
    print("选取第一回的文字，应用搜狗的细胞词库和停用词表的分词结果")
    tmpstr = Read_target_txt(Read_target_chapter("金庸-射雕英雄传txt精校版.txt"))
    Custom_Dictionary("金庸小说词库.txt")
    output1 = jieba.lcut(Remove_stop_words(tmpstr))
    print("/".join(output1))


def main2():
    # 选取第一回中最长的1个段落，比较不使用词库、不使用停用词表前后的分词结果。
    tmpchap = Read_target_chapter("金庸-射雕英雄传txt精校版.txt")
    list1 = [len(i) for i in tmpchap.txt]
    max_index = list1.index(max(list1))
    Longest_paragraph = tmpchap.txt[max_index]  # 选取第一回中最长的1个段落
    # 不使用词库的分词结果
    print("\n不使用词库分词结果")
    output2 = jieba.lcut(Remove_stop_words(Longest_paragraph))
    print("/".join(output2))
    # 使用金庸小说词库的分词结果
    print("\n使用金庸小说词库分词结果")
    Custom_Dictionary("金庸小说词库.txt")
    output3 = jieba.lcut(Remove_stop_words(Longest_paragraph))
    print("/".join(output3))
    # 使用金庸地名词库的分词结果
    print("\n使用金庸地名词库分词结果")
    Custom_Dictionary("金庸地名.txt")
    output4 = jieba.lcut(Remove_stop_words(Longest_paragraph))
    print("/".join(output4))
    # 不去除停用词的分词结果
    print("\n不去除停用词分词结果")
    Custom_Dictionary("金庸小说词库.txt")
    output5 = jieba.lcut(Longest_paragraph)
    print("/".join(output5))


if __name__ == "__main__":
    main1()
    main2()