import requests
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import re
from collections import Counter

# 设置matplotlib字体，支持中文显示
import matplotlib
matplotlib.rcParams['font.family'] = ['SimHei']  # 黑体

# 第一部分：爬取网页内容
print("正在爬取网页...")
url = 'https://www.scct.cn/jtxxxy/info/1275/2514.htm'
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
}

response = requests.get(url=url, headers=headers)
response.encoding = 'utf-8'  # 设置编码格式
html_doc = response.text

# 第二部分：解析HTML内容（参考jiexi.py）
print("正在解析网页内容...")
soup = BeautifulSoup(html_doc, 'html.parser')
article = soup.find('div', class_='v_news_content')

text_content = ""
if article:
    for p in article.find_all('p'):
        text_content += p.text + "\n"
else:
    # 如果找不到特定的内容区域，尝试其他选择器
    content_div = None
    for div in soup.find_all('div', class_=True):
        class_name = div.get('class')
        if class_name and any('content' in c.lower() for c in class_name):
            content_div = div
            break
    
    if content_div:
        for p in content_div.find_all('p'):
            text_content += p.text + "\n"
    else:
        # 如果仍找不到内容，获取所有段落
        for p in soup.find_all('p'):
            if len(p.text.strip()) > 10:  # 忽略太短的段落
                text_content += p.text + "\n"

print(f"提取的文本内容长度: {len(text_content)} 字符")

# 第三部分：提取双字词并统计
print("正在进行分词统计...")
# 清除非中文字符
text = re.sub(r'[^\u4e00-\u9fa5]', ' ', text_content)

# 直接提取所有双字词
two_char_words = re.findall(r'[\u4e00-\u9fa5]{2}', text)
print(f"找到 {len(two_char_words)} 个双字词")
print(f"不同双字词数量: {len(set(two_char_words))}")

# 统计词频
word_counter = Counter(two_char_words)

# 输出词频统计结果
print("\n双字词频统计结果（前10个）:")
top_words = word_counter.most_common(10)
for word, count in top_words:
    print(f"{word}: {count}次")

# 保存词频统计结果到文件
with open('word_counts.txt', 'w', encoding='utf-8') as f:
    for word, count in word_counter.most_common():
        f.write(f"{word}: {count}次\n")
print("词频统计结果已保存到 word_counts.txt")

# 第四部分：可视化展示（参考huatu.py）
print("正在生成可视化图表...")
# 获取出现频率最高的5个词
top_five = word_counter.most_common(5)
x = [item[0] for item in top_five]
y = [item[1] for item in top_five]

# 创建图形
plt.figure(figsize=(10, 6))
colors = ['#FF9999', '#66B3FF', '#99FF99', '#FFCC99', '#FF99CC']
plt.bar(x, y, color=colors)
plt.xticks(rotation=45)

# 添加标题和标签
plt.title("网页正文中出现频率最高的5个双字词")
plt.xlabel("词语")
plt.ylabel("出现次数")
plt.tight_layout()

# 在柱子上方显示数值
for i, v in enumerate(y):
    plt.text(i, v + 0.5, str(v), ha='center')

# 保存图表
plt.savefig('word_frequency.png')
print("图表已保存为 word_frequency.png")

# 显示图形
plt.show() 