import requests
from bs4 import BeautifulSoup
import time
import os
from urllib.parse import urljoin

# 配置信息
BASE_URL = 'https://www.runoob.com'
START_URL = f'{BASE_URL}/python3/python3-examples.html'
OUTPUT_FILE = 'python3_examples.md'
DELAY = 1  # 请求间隔(秒)，避免被封IP

# 请求头配置
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
}

def get_page(url):
    """发送HTTP请求获取页面内容"""
    try:
        print(f"正在请求: {url}")
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"请求失败: {e}")
        return None

def extract_links(html_content):
    """从HTML内容中提取article-intro类下的所有链接"""
    if not html_content:
        return []
    
    soup = BeautifulSoup(html_content, 'html.parser')
    article_intro = soup.find(class_='article-intro')
    if not article_intro:
        print("未找到'article-intro'元素")
        return []
    
    links = []
    for a_tag in article_intro.find_all('a'):
        href = a_tag.get('href')
        if href:
            absolute_url = urljoin(BASE_URL, href)
            title = a_tag.get_text(strip=True)
            links.append({'title': title, 'url': absolute_url})
    
    print(f"共提取到 {len(links)} 个链接")
    return links

def extract_code(html_content):
    """从HTML内容中提取example_code类下的代码"""
    if not html_content:
        return None
    
    soup = BeautifulSoup(html_content, 'html.parser')
    code_element = soup.find(class_='example_code')
    if not code_element:
        print("未找到'example_code'元素")
        return None
    
    # 提取<pre>标签中的代码
    pre_tag = code_element.find('pre')
    if pre_tag:
        return pre_tag.get_text(strip=False)
    
    # 如果没有<pre>标签，直接返回example_code中的文本
    return code_element.get_text(strip=False)

def generate_markdown(links, output_file):
    """生成Markdown文件"""
    with open(output_file, 'w', encoding='utf-8') as f:
        # 写入Markdown文件头部
        f.write("# Python3 示例代码汇总\n\n")
        f.write(f"来源：{START_URL}\n\n")
        f.write("---\n\n")
        
        total = len(links)
        success = 0
        
        for i, link_info in enumerate(links, 1):
            title = link_info['title']
            url = link_info['url']
            
            print(f"\n[{i}/{total}] 正在处理: {title}")
            
            # 获取页面内容
            html_content = get_page(url)
            if not html_content:
                print(f"跳过: {title}")
                continue
            
            # 提取代码
            code = extract_code(html_content)
            if not code:
                print(f"未找到代码，跳过: {title}")
                continue
            
            # 写入Markdown文件
            f.write(f"## {title}\n\n")
            f.write(f"来源：[{url}]({url})\n\n")
            f.write("```python\n")
            f.write(f"{code}\n")
            f.write("```\n\n")
            f.write("---\n\n")
            
            success += 1
            print(f"成功抓取并写入: {title}")
            
            # 延时，避免频繁请求
            time.sleep(DELAY)
        
        f.write(f"\n\n### 统计信息\n")
        f.write(f"- 总共尝试: {total} 个页面\n")
        f.write(f"- 成功抓取: {success} 个代码示例\n")
        f.write(f"- 生成时间: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
    
    print(f"\nMarkdown文件已生成: {os.path.abspath(output_file)}")
    print(f"共成功抓取 {success} 个代码示例")

def main():
    """主函数"""
    print(f"开始爬取Python3示例代码...")
    
    # 步骤1: 获取起始页面
    start_content = get_page(START_URL)
    if not start_content:
        print("无法获取起始页面，程序退出")
        return
    
    # 步骤2: 提取所有链接
    links = extract_links(start_content)
    if not links:
        print("未找到有效链接，程序退出")
        return
    
    # 步骤3: 遍历链接，提取代码并生成Markdown
    generate_markdown(links, OUTPUT_FILE)

if __name__ == "__main__":
    main()    