import requests
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import re
from collections import Counter

# 设置matplotlib字体，支持中文显示
import matplotlib
matplotlib.rcParams['font.family'] = ['SimHei']  # 黑体

# 第一部分：爬取网页内容
print("正在爬取网页...")
url = 'https://www.scct.cn/jtxxxy/info/1275/2514.htm'
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
}

response = requests.get(url=url, headers=headers)
# 尝试不同的编码方式
response.encoding = 'utf-8'  # 先尝试UTF-8
html_doc = response.text

# 检查是否为GB2312或GBK编码
if '�' in html_doc:
    response.encoding = 'gb2312'
    html_doc = response.text
    if '�' in html_doc:
        response.encoding = 'gbk'
        html_doc = response.text

print(f"网页内容长度: {len(html_doc)} 字符")
print(f"使用的编码: {response.encoding}")

# 第二部分：解析网页内容
print("正在解析网页内容...")
soup = BeautifulSoup(html_doc, 'html.parser')
print(f"页面标题: {soup.title.string if soup.title else '无标题'}")

# 先查找文章内容区
content_div = None
for div in soup.find_all('div', class_=True):
    class_name = div.get('class')
    if any('content' in c.lower() for c in class_name):
        content_div = div
        print(f"找到内容区域: {class_name}")
        break

if not content_div:
    print("未找到内容区域，将使用整个页面")
    content_div = soup

# 提取所有段落
paragraphs = content_div.find_all('p')
print(f"找到了 {len(paragraphs)} 个段落")

text_content = ""
for p in paragraphs:
    text = p.get_text().strip()
    if text and len(text) > 5:  # 忽略太短的段落
        text_content += text + "\n"
        print(f"段落: {text[:50]}...")

print(f"提取的文本内容长度: {len(text_content)} 字符")

# 保存提取的文本
with open('extracted_text.txt', 'w', encoding='utf-8') as f:
    f.write(text_content)

# 第三部分：分词和统计
print("正在进行分词统计...")
words = []

# 检查是否提取到中文内容
chinese_chars = re.findall(r'[\u4e00-\u9fa5]', text_content)
if not chinese_chars:
    print("警告：未找到中文字符！")
    print("原始内容示例: " + text_content[:200])
    exit(1)

print(f"中文字符数量: {len(chinese_chars)}")

# 匹配连续的中文字符(2个及以上)
matches = re.findall(r'[\u4e00-\u9fa5]{2,}', text_content)
print(f"找到 {len(matches)} 个连续中文字符串")

# 提取词语
for word in matches:
    if len(word) == 2:
        words.append(word)
    else:
        for i in range(len(word)-1):
            words.append(word[i:i+2])  # 2字词
            if i+3 <= len(word):
                words.append(word[i:i+3])  # 3字词
            if i+4 <= len(word):
                words.append(word[i:i+4])  # 4字词

print(f"总共提取了 {len(words)} 个词语")
print(f"不同词语数量: {len(set(words))}")

# 输出词语示例
print("词语示例:")
for w in list(set(words))[:10]:
    print(w, end=" ")
print()

# 统计词频
word_counter = Counter(words)

# 输出词频统计结果
print("\n词频统计结果（前10个）:")
for word, count in word_counter.most_common(10):
    print(f"{word}: {count}次")

# 保存词频统计结果到文件
with open('word_counts.txt', 'w', encoding='utf-8') as f:
    for word, count in word_counter.most_common():
        f.write(f"{word}: {count}次\n")
print("词频统计结果已保存到 word_counts.txt")

# 第四部分：可视化展示
print("正在生成可视化图表...")
# 获取出现频率最高的5个词
top_words = word_counter.most_common(5)
if not top_words:
    print("没有找到足够的词语进行可视化")
    exit(1)

x = [item[0] for item in top_words]
y = [item[1] for item in top_words]

# 创建图形
plt.figure(figsize=(10, 6))
colors = ['#FF9999', '#66B3FF', '#99FF99', '#FFCC99', '#FF99CC']
plt.bar(x, y, color=colors)
plt.xticks(rotation=45)

# 添加标题和标签
plt.title("网页正文中出现频率最高的5个词")
plt.xlabel("词语")
plt.ylabel("出现次数")
plt.tight_layout()

# 在柱子上方显示数值
for i, v in enumerate(y):
    plt.text(i, v + 0.5, str(v), ha='center')

# 保存图表
plt.savefig('word_frequency.png')
print("图表已保存为 word_frequency.png")

# 显示图形
# plt.show() 