# 学习课程：中文文本挖掘
# 学习学生：姜浩然

import jieba
import pandas as pd
import wordcloud
import numpy as np
import matplotlib.pyplot as plt
import imageio
from wordcloud import get_single_color_func
from PIL import Image
import os
class GroupedColorFunc(object):

    def __init__(self, color_to_words, default_color):   # 系统定义颜色
        self.color_func_to_words = [(get_single_color_func(color), set(words))
            for (color, words) in color_to_words.items()]

        self.default_color_func = get_single_color_func(default_color)

    def get_color_func(self, word):
        """Returns a single_color_func associated with the word"""
        try:
            color_func = next(
                color_func for (color_func, words) in self.color_func_to_words
                if word in words)
        except StopIteration:
            color_func = self.default_color_func

        return color_func

    def __call__(self, word, **kwargs):
        return self.get_color_func(word)(word, **kwargs)

raw = pd.read_csv(r"D:\python\金庸-射雕英雄传txt精校版.txt",
                  names = ['txt'], sep ='aaa', encoding ="GBK" ,engine='python')
def m_head(tmpstr):
    return tmpstr[:1]

def m_mid(tmpstr):
    return tmpstr.find("回 ")

raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)

# 判断章节
chapnum = 0
for i in range(len(raw)):
    if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30:
        chapnum += 1
    if chapnum >= 40 and raw['txt'][i] == "附录一：成吉思汗家族":
        chapnum = 0
    raw.loc[i, 'chap'] = chapnum

# 获取某一章节内容
def inputchapter(number):
    tmpchap = raw[raw['chap'] == number].copy()
    tmpchap.reset_index(drop=True, inplace=True)
    tmpchap['paraidx'] = tmpchap.index
    return tmpchap

# 使用jieba进行分词
def cutwords(number):
    list1 = []
    temp = inputchapter(number)
    dict = open(r"D:\python\停用词.txt",encoding='utf-8')
    jieba.load_userdict(dict)    # 自定义词库
    for i in range(1,len(temp['txt'])):
        words = jieba.cut(temp['txt'][i])
        for item in words:
            list1.append(item)
    return list1   # 获得通过jieba分词后的初步分词结果

# 使用停用词对章节进行分词
def stop_word(number):
    temp = cutwords(number)
    stop_ = open(r"D:\python\停用词.txt",encoding='utf-8').readlines()
    stop_1 = [word.strip() for word in stop_]   # 净化一些不必要的符号
    list2 = [w for w in temp if w not in stop_1]
    return list2   # 该列表为分词结果

# 输出人名和地名
def cutnames(number):
    temp = stop_word(number)
    names = open(r"D:\python\人名.txt",encoding='utf-8').readlines()
    names_ = [line.strip() for line in names]
    list3 = [w for w in temp if w in names_]  # 分出了所有的人名
    places = open(r"D:\python\地名.txt",encoding='utf-8').readlines()
    places_ = [line.strip() for line in places]
    list4 = [w for w in temp if w in places_]  # 分出了所有的地名
    return list3,list4

# 绘制词云
def drawword(number,picture,name):
    temp = cutnames(number)
    temp1 = stop_word(number)
    myfont = 'STSONG.TTF'    # 设置字体
    name_list = [w for w in temp[0]]
    place_list = [w for w in temp[1]]
    text = ' '.join(temp1)
    mask = np.array(Image.open("D:\\python\\图片.jpg"))  # 设置背景
    cloudobj = wordcloud.WordCloud(font_path=myfont,
                                   mask=mask,width = 360, height = 180,
    mode = "RGBA", background_color = None).generate(text)
    color_to_words = {
        'blue': name_list,
        'red': place_list
    }
    default_color = 'yellow'  # 指定其他词条的颜色
    grouped_color_func = GroupedColorFunc(color_to_words, default_color)
    cloudobj.recolor(color_func=grouped_color_func)
    if not os.path.exists("D:\\python\\图片.jpg"):
        cloudobj.to_file("D:\\python\\图片.jpg")   # 保存图片
    plt.imshow(cloudobj)
    plt.axis("off")
    plt.show()
    imgobj = imageio.imread(picture)
    image_colors = wordcloud.ImageColorGenerator(np.array(imgobj))
    cloudobj.recolor(color_func=image_colors)
    cloudobj.to_file(name)
    plt.imshow(cloudobj)
    plt.axis("off")
    plt.show()

if __name__ == '__main__':
    drawword(int(input("请输入你想看的章节:")),"D:\\python\\图片1.jpg","D:\\python\\yellow.png")
