import pandas as pd
import re
import jieba

#读取文件
raw = pd.read_csv("金庸-射雕英雄传txt精校版.txt",
                  names=['txt'], sep='aaa', encoding="GBK", engine='python')

# 章节判断用变量预处理
def m_head(tmpstr):
    return tmpstr[:1]

def m_mid(tmpstr):
    return tmpstr.find("回 ")

raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)


# 章节判断
chapnum = 0
for i in range(len(raw)):
    if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30 :
        chapnum += 1
    if chapnum >= 40 and raw['txt'][i] == "附录一：成吉思汗家族" :
        chapnum = 0
    raw.loc[i, 'chap'] = chapnum
del raw['head']
del raw['mid']
del raw['len']


#选取需要处理的章节一
tmpchap = raw[raw['chap'] ==1].copy()
tmpchap.reset_index(drop=True, inplace=True)
tmpchap['paraidx'] = tmpchap.index
tmpparas=[]
for i in tmpchap.index:
    tmppara = tmpchap.txt[i]
    tmpparas.append(tmppara)

#选取需要处理的最长的段落
length = []
for i in tmpparas:
    length.append(len(i))
maxpara_index = length.index(max(length))
longest_para = tmpparas[maxpara_index]
# print(longest_para)

#不使用停用词表
text=jieba.lcut(longest_para)
print("不使用停用词文件的分词结果如下："+str(text))


# #使用停用词表
stopwords = [line.strip() for line in open('停用词.txt',encoding='UTF-8').readlines()]
sens_list=jieba.lcut(longest_para)
longest_para=" "
for word in sens_list:
    if word not in stopwords:
        if word!='\t':
            longest_para+=word
            longest_para+=" "
print("使用停词表的分词结果如下： "+longest_para)

# #使用停用词和搜狗词库
jieba.load_userdict("搜狗武侠小说词库.txt")
stopwords = [line.strip() for line in open('停用词.txt', encoding='UTF-8').readlines()]
sens_list = jieba.lcut(longest_para)
longest_para = " "
for word in sens_list:
    if word not in stopwords:
        if word != '\t':
            longest_para += word
            longest_para += " "
print("使用停词表和搜狗词库的分词结果如下： "+longest_para)

