# 诗文爬取脚本
import requests
from bs4 import BeautifulSoup
import json
import csv
import time

# 目标网址
url = 'https://www.gushiwen.cn/gushi/tangshi.aspx'

# 发送请求
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

response = requests.get(url, headers=headers, timeout=10)  # 添加超时设置
response.encoding = 'utf-8'

# 获取网页内容
html_content = response.text

# 确保获取到内容
if not html_content:
    print('无法获取网页内容')
    exit(1)

# 解析网页
print('开始解析网页内容...')
soup = BeautifulSoup(html_content, 'html.parser')

# 打印网页结构，帮助调试
print('网页结构预览:')
print(soup.prettify()[:2000])  # 打印前2000个字符

# 提取诗句数据
poems = []

# 根据网页结构，尝试不同的选择器组合
# 1. 尝试直接查找所有诗歌容器
poem_containers = soup.select('.right .sons .cont')

if not poem_containers:
    # 2. 如果第一种选择器失败，尝试其他可能的选择器
    poem_containers = soup.select('.cont')

if not poem_containers:
    # 3. 最后的备选方案
    poem_containers = soup.find_all('div', recursive=True)

print(f'找到 {len(poem_containers)} 个诗歌容器')
# alist = soup(poem_containers).find_all("a")
for idx, poem in enumerate(poem_containers):
    a_con = poem.find_all("a")
    for a_item in a_con:
        try:
            # 提取标题
            title = a_item.text
            href = a_item.get("href")
        
            # 只添加有意义的数据
            if title != '' or href:
                poems.append({
                    'type': title,
                    'href': href,
    
                })
        except Exception as e:
            print(f'处理第 {idx+1} 个诗歌容器时出错: {e}')

print(f'成功解析 {len(poems)} 首诗歌')

# 将提取的诗歌数据保存为CSV文件
# csv_file = 'tangshi_poems.csv'
# with open(csv_file, 'w', newline='', encoding='utf-8-sig') as f:
#     writer = csv.DictWriter(f, fieldnames=['类型', '标题', '作者', '内容'])
#     writer.writeheader()
#     writer.writerows(poems)
# print(f'诗歌数据已成功保存到 {csv_file}')

# 也可以保存为JSON格式
json_file = 'tangshi_poems.json'
with open(json_file, 'w', encoding='utf-8') as f:
    json.dump(poems, f, ensure_ascii=False, indent=4)
print(f'诗歌数据已成功保存到 {json_file}')

print(f'成功爬取 {len(poems)} 首诗句，已保存到 {json_file} 文件中。')

# 注意：运行前需要安装依赖库：
# pip install requests beautifulsoup4

