import requests
from bs4 import BeautifulSoup
import jieba
import matplotlib.pyplot as plt
import re
from collections import Counter

# 设置matplotlib字体，支持中文显示
import matplotlib
matplotlib.rcParams['font.family'] = ['SimHei']  # 黑体

# 第一部分：爬取网页并保存（参考main.py和pawangye.py）
url = 'https://www.scct.cn/jtxxxy/info/1275/2514.htm'
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
}

response = requests.get(url=url, headers=headers)
response.encoding = 'utf-8'
html_doc = response.text

# 第二部分：解析HTML内容（参考jiexi.py）
soup = BeautifulSoup(html_doc, 'html.parser')
article = soup.find('div', class_='v_news_content')

text_content = ""
if article:
    for p in article.find_all('p'):
        text_content += p.text + "\n"
        print(p.text)

# 第三部分：分词和词频统计（参考qieci.py）
# 清除非中文字符
text = re.sub(r'[^\u4e00-\u9fa5]', ' ', text_content)

# 使用jieba进行分词
words = jieba.lcut(text)

# 过滤掉长度小于2的词
valid_words = [word for word in words if len(word) >= 2]

# 统计词频
word_counts = {}
for word in set(valid_words):
    word_counts[word] = valid_words.count(word)

# 输出词频统计结果
sorted_words = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
for word, count in sorted_words[:10]:
    print(f"{word}: {count}次")

# 第四部分：可视化展示（参考huatu.py）
# 获取出现频率最高的5个词
top_five = sorted_words[:5]
x = []
y = []
for word, count in top_five:
    x.append(word)
    y.append(count)

# 创建图形
plt.figure(figsize=(10, 6))
colors = ['#FF9999', '#66B3FF', '#99FF99', '#FFCC99', '#FF99CC']
plt.bar(x, y, color=colors)
plt.xticks(rotation=45)

# 添加标题和标签
plt.title("网页正文中出现频率最高的5个词")
plt.xlabel("词语")
plt.ylabel("出现次数")
plt.tight_layout()

# 在柱子上方显示数值
for i, v in enumerate(y):
    plt.text(i, v + 0.5, str(v), ha='center')

# 显示图形
plt.show() 