import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import os
import time

def crawl_next_pages(start_url):
    """
    从指定URL开始爬取，递归查找"下一页"链接并保存对应页面

    参数:
    start_url (str): 初始网页地址

    引用:
     爬虫基础实现与urljoin [[1]]
     HTML解析技巧 [[2]]
    """
    current_url = start_url
    page_count = 1

    while True:
        try:
            # 获取当前页面内容（带3次重试机制）
            response = None
            for attempt in range(3):
                try:
                    response = requests.get(
                        current_url,
                        timeout=10,
                        headers={'User-Agent': 'WebCrawler/1.0'}
                    )
                    response.encoding = 'utf-8'
                    response.raise_for_status()
                    break
                except requests.RequestException as e:
                    print(f"请求 {current_url} 失败，第{attempt+1}次重试...")
                    if attempt == 2:
                        raise

            soup = BeautifulSoup(response.text, 'html.parser')

            # 生成文件名
            if page_count == 1:
                filename = "index.html"
            else:
                next_link = soup.find(lambda tag:
                    tag.name == 'a' and
                    tag.get_text(strip=True) == '下一页'
                )
                if not next_link:
                    print("已到达末页")
                    break
                next_url = urljoin(current_url, next_link['href'])
                parsed_url = urlparse(next_url)
                path = parsed_url.path
                filename = os.path.basename(path)
                filename = filename.split('?')[0].split('#')[0]

            # 保存当前页面
            with open(filename, 'w', encoding='utf-8') as f:
                f.write(response.text)
            print(f"\r已保存第{page_count}页: {filename} : {current_url}", end='', flush=True)

            # 查找下一页链接（仅用于下一轮）
            next_link = soup.find(lambda tag:
                tag.name == 'a' and
                tag.get_text(strip=True) == '下一页'
            )
            if not next_link:
                print("\n已到达末页")
                break
            next_url = urljoin(current_url, next_link['href'])

            # 更新状态
            current_url = next_url
            page_count += 1
            time.sleep(1)

        except requests.RequestException as e:
            print(f"\n请求失败超过最大重试次数: {str(e)}")
            time.sleep(10)
            print("停止爬取")
            break
        except Exception as e:
            print(f"\n处理失败: {str(e)}")
            time.sleep(10)
            break

if __name__ == "__main__":
    crawl_next_pages("http://www.postgres.cn/docs/current/")