import requests
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import re
from collections import Counter

# 设置matplotlib字体，支持中文显示
import matplotlib
matplotlib.rcParams['font.family'] = ['SimHei']  # 黑体

# 第一部分：爬取网页内容
print("正在爬取网页...")
url = 'https://www.scct.cn/jtxxxy/info/1275/2514.htm'
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
}

response = requests.get(url=url, headers=headers)
response.encoding = 'utf-8'
html_doc = response.text
print(f"网页内容长度: {len(html_doc)} 字符")

# 保存原始HTML内容
with open('raw_html.html', 'w', encoding='utf-8') as f:
    f.write(html_doc)
print("原始HTML已保存到 raw_html.html")

# 第二部分：解析网页内容
print("正在解析网页内容...")
soup = BeautifulSoup(html_doc, 'html.parser')
print(f"页面标题: {soup.title.string if soup.title else '无标题'}")

# 尝试不同的选择器查找正文
article = soup.find('div', class_='v_news_content')
if not article:
    print("未找到 div.v_news_content，尝试其他选择器...")
    article = soup.find('div', id='content')
if not article:
    print("未找到 div#content，尝试其他选择器...")
    article = soup.find('article')
if not article:
    print("未找到 article，尝试获取所有段落...")
    article = soup

text_content = ""
paragraphs = article.find_all('p')
print(f"找到了 {len(paragraphs)} 个段落")

for p in paragraphs:
    text_content += p.text.strip() + "\n"
    print(f"段落: {p.text.strip()[:50]}...")

print(f"提取的文本内容长度: {len(text_content)} 字符")

# 保存提取的文本
with open('extracted_text.txt', 'w', encoding='utf-8') as f:
    f.write(text_content)
print("提取的文本已保存到 extracted_text.txt")

# 第三部分：分词和统计
print("正在进行分词统计...")
words = []

# 输出中文字符数量
chinese_chars = re.findall(r'[\u4e00-\u9fa5]', text_content)
print(f"中文字符数量: {len(chinese_chars)}")

# 匹配连续的中文字符(2个及以上)
matches = re.findall(r'[\u4e00-\u9fa5]{2,}', text_content)
print(f"找到 {len(matches)} 个连续中文字符串")

# 提取词语
for word in matches:
    if len(word) == 2:
        words.append(word)
    else:
        for i in range(len(word)-1):
            words.append(word[i:i+2])  # 2字词
            if i+3 <= len(word):
                words.append(word[i:i+3])  # 3字词
            if i+4 <= len(word):
                words.append(word[i:i+4])  # 4字词

print(f"总共提取了 {len(words)} 个词语")
print(f"不同词语数量: {len(set(words))}")

# 统计词频
word_counter = Counter(words)

# 输出词频统计结果
print("\n词频统计结果（前10个）:")
for word, count in word_counter.most_common(10):
    print(f"{word}: {count}次")

# 保存词频统计结果到文件
with open('word_counts.txt', 'w', encoding='utf-8') as f:
    for word, count in word_counter.most_common():
        f.write(f"{word}: {count}次\n")
print("词频统计结果已保存到 word_counts.txt")

# 第四部分：可视化展示
print("正在生成可视化图表...")
# 获取出现频率最高的5个词
top_words = word_counter.most_common(5)
if not top_words:
    print("没有找到足够的词语进行可视化")
    exit(1)

x = [item[0] for item in top_words]
y = [item[1] for item in top_words]

# 创建图形
plt.figure(figsize=(10, 6))
colors = ['#FF9999', '#66B3FF', '#99FF99', '#FFCC99', '#FF99CC']
plt.bar(x, y, color=colors)
plt.xticks(rotation=45)

# 添加标题和标签
plt.title("网页正文中出现频率最高的5个词")
plt.xlabel("词语")
plt.ylabel("出现次数")
plt.tight_layout()

# 在柱子上方显示数值
for i, v in enumerate(y):
    plt.text(i, v + 0.5, str(v), ha='center')

# 保存图表
plt.savefig('word_frequency.png')
print("图表已保存为 word_frequency.png")

# 显示图形
# plt.show() 