import requests
from bs4 import BeautifulSoup
import json
import time
import os

# 创建content目录来存储获取的内容
os.makedirs('content', exist_ok=True)

# 读取菜单链接
with open('scraper/fastapi_learn_menu.json', 'r', encoding='utf-8') as f:
    menu_links = json.load(f)

# 设置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

# 获取每个页面的内容
content_dict = {}

for title, url in list(menu_links.items())[:10]:  # 先获取前10个页面作为示例
    try:
        print(f"正在获取: {title} - {url}")
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = response.apparent_encoding
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取主要内容
        content_div = soup.find('div', class_='md-content')
        if content_div:
            # 移除导航元素
            for nav in content_div.find_all('nav'):
                nav.decompose()
            
            # 提取文本内容
            content_text = content_div.get_text(strip=False)
            content_dict[title] = {
                'url': url,
                'content': content_text
            }
            
            # 保存为单独的文件
            filename = f"content/{title.replace('/', '_')}.txt"
            with open(filename, 'w', encoding='utf-8') as f:
                f.write(f"Title: {title}\n")
                f.write(f"URL: {url}\n\n")
                f.write(content_text)
        
        # 添加延迟避免过于频繁的请求
        time.sleep(0.5)
        
    except Exception as e:
        print(f"获取 {title} 失败: {e}")

# 保存内容字典
with open('content/content_dict.json', 'w', encoding='utf-8') as f:
    json.dump(content_dict, f, ensure_ascii=False, indent=2)

print("内容获取完成")