import requests
from bs4 import BeautifulSoup
import jieba.posseg as pseg
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt

# 提取笔趣阁小说内容
def get_bqg_novel(soup):
    content_div = soup.find('div', id='content')
    bookname_div = soup.find('div', class_='bookname')
    title, novel_text = "", ""
    if bookname_div:
        title = bookname_div.find('h1').text
        # title = title.strip() + "\n"  # 去首尾空格
    if content_div:
        p_tags = content_div.find_all('p')
        # 跳过第一个p标签(广告)内容, 提取剩余的p标签内容并拼接
        novel_text = ''.join([p.text for p in p_tags[1:]])
    else:
        print("未找到<div id=content>")
        return "爬取失败", ""
    novel_text = novel_text.replace('\n\n', '').replace(' ', '')
    # novel_text = novel_text.replace("\r\n", '') # 如果保存时要去换行加上这一行
    return title, novel_text

# 保存，root保存目录，path保存路径，text保存内容
def save_novel(root, path, text):
    try:
        import os
        if not os.path.exists(root):
            os.makedirs(root)
        if not os.path.exists(path):
            with open(path, "wb") as f:
                f.write(text.encode())
                print("已保存到{}".format(path))
        else:
            with open(path, "ab") as f:
                f.write(text.encode())
                print("已追加到{}".format(path))
    except Exception as e:
        print("保存过程异常:", e)



# 获取章数
n = 3
# 保存路径
root = './novel/'
path = root + "3.txt"
list_names = []

for i in range(n):
    url = "https://www.biquke.vip/book/209/{}.html".format(i + 139978)  # 笔趣阁：诡秘之主
    # url = "https://www.biquke.vip/book/200/{}.html".format(i + 133810) # 笔趣阁：遮天

    # 获取HTML文本
    r = requests.get(url)
    r.encoding = 'utf-8'  # 无论原来用什么编码，都改成utf-8
    # 提取内容
    soup = BeautifulSoup(r.text, 'lxml')
    title, novel_text = get_bqg_novel(soup)
    # 保存这一章小说
    save_novel(root, path, title + novel_text)
    # 提取人物姓名
    words = pseg.cut(novel_text)
    words = [word.word for word in words if word.flag == 'nr' and len(word.word) > 1]
    list_names.extend(words)



'''
对提取的词进行过滤 + 统计
返回字典 {姓名: 个数}
'''
def name_filter(words):
    counts = {}
    # 排除的错误姓名
    excludes = {"福生玄", "毛巾", "黄铜", "张开", "莫雷蒂", "墨水瓶", "盏灯", "小丑", "占卜师"}
    for word in words:
        if word == "克莱恩":
            rword = "克莱恩.莫雷蒂"
        elif word == '':
            rword = word
        else:
            rword = word
        counts[rword] = counts.get(rword, 0) + 1
    for word in excludes:
        if word in counts:
            del counts[word]
    return counts

# 过滤 + 统计人物姓名次数
counts = name_filter(list_names)

# 获取前10个出现次数最多的人物姓名
print("前{}章人物出现次数前10的人物".format(n))
top_10 = Counter(counts).most_common(10)
top_10 = {name: count for name, count in top_10}

# 输出前10人物次数
for name, count in top_10.items():
    print(f"人物姓名：{name}，出现次数：{count}")

# 创建WordCloud对象并生成词云图
wordcloud = WordCloud(font_path="msyh.ttc", width=800, height=400, background_color="white")
wordcloud.generate_from_frequencies(top_10)

# 展示词云图
plt.figure(figsize=(10, 5))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()