# 学习课程：中文文本挖掘
# 学习学生：姜浩然

import pandas as pd
import jieba

raw = pd.read_csv(r"D:\python\金庸-射雕英雄传txt精校版.txt",
                  names = ['txt'], sep ='aaa', encoding ="GBK" ,engine='python')
# print(raw)
def m_head(tmpstr):
    return tmpstr[:1]

def m_mid(tmpstr):
    return tmpstr.find("回 ")

raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)

# 章节判断
chapnum = 0
for i in range(len(raw)):
    if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30:
        chapnum += 1
    if chapnum >= 40 and raw['txt'][i] == "附录一：成吉思汗家族":
        chapnum = 0
    raw.loc[i, 'chap'] = chapnum

# 删除临时变量
del raw['head']
del raw['mid']
del raw['len']

# 获取某一章节内容
def inputchapter(number):
    tmpchap = raw[raw['chap'] == number].copy()
    tmpchap.reset_index(drop=True, inplace=True)
    tmpchap['paraidx'] = tmpchap.index
    return tmpchap

# 使用jieba进行分词
def cutwords(number):
    list1 = []
    temp = inputchapter(number)
    dict = open(r"D:\python\停用词.txt",encoding='utf-8')
    jieba.load_userdict(dict)    # 用我们的停用词词库
    for i in range(1,len(temp['txt'])):
        words = jieba.cut(temp['txt'][i])
        for word in words:
            list1.append(word)
    return list1

# 使用停用词对章节进行分词
def stop_word(number):
    temp = cutwords(number)
    stop_ = open(r"D:\python\停用词.txt",encoding='utf-8').readlines()
    stop_1 = [word.strip() for word in stop_]   # 去除读取时存在的\n空格等符号的影响
    list2 = [w for w in temp if w not in stop_1]
    return list2   # 该列表为干净的分词结果

if __name__ == '__main__':
    temp1 = stop_word(int(input("请输入您想看的章节:")))
    print('使用停用词进行分词:',temp1)

