import os
import requests
from bs4 import BeautifulSoup
from markdownify import markdownify as md
from urllib.parse import urljoin

HEADERS = {'User-Agent': 'Mozilla/5.0'}
TIMEOUT = 10
MAX_DEPTH = 5  # 新增爬取深度限制

def create_folder_structure(base_folder, url):
    path_parts = url.strip('/').split('/')
    current_folder = base_folder
    for part in path_parts:
        part = part.replace(':', '_').replace('?', '_')  # 过滤非法字符
        current_folder = os.path.join(current_folder, part)
        os.makedirs(current_folder, exist_ok=True)
    return current_folder

def save_page_as_md(folder, url):
    try:
        response = requests.get(url, headers=HEADERS, timeout=TIMEOUT)
        response.raise_for_status()

        soup = BeautifulSoup(response.text, 'html.parser')
        main_content = soup.find('main') or soup  # 定位主要内容区域
        md_content = md(str(main_content), heading_style='ATX')

        file_path = os.path.join(folder, 'index.md')
        with open(file_path, 'w', encoding='utf-8') as file:
            file.write(f"# Source: {url}\n\n{md_content}")
        print(f"Saved: {file_path}")
    except Exception as e:
        print(f"Error processing {url}: {str(e)}")

def get_valid_links(base_url, html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    links = set()
    for a_tag in soup.find_all('a', href=True):
        full_url = urljoin(base_url, a_tag['href']).rstrip('/')
        if full_url.startswith(base_url):
            links.add(full_url)
    return links

def crawl_and_save(base_folder, base_url, visited=None, depth=0):
    visited = visited or set()
    if depth > MAX_DEPTH or base_url in visited:
        return
    visited.add(base_url)

    try:
        response = requests.get(base_url, headers=HEADERS, timeout=TIMEOUT)
        response.raise_for_status()

        folder = create_folder_structure(base_folder, base_url)
        save_page_as_md(folder, base_url)

        if depth < MAX_DEPTH:
            links = get_valid_links(base_url, response.text)
            for link in links:
                crawl_and_save(base_folder, link, visited, depth+1)

    except Exception as e:
        print(f"Error crawling {base_url}: {str(e)}")

if __name__ == "__main__":
    base_folder = r'C:\Users\Msi\Desktop\temp\crawler_test\bannerlord_moddocs'
    start_url = 'https://moddocs.bannerlord.com/'
    crawl_and_save(base_folder, start_url)
