import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import time
import os
from concurrent.futures import ThreadPoolExecutor


class SiteCrawler:
    def __init__(self, base_url, max_depth=3, max_workers=5):
        self.base_url = base_url
        self.domain = urlparse(base_url).netloc
        self.visited = set()
        self.to_visit = set()
        self.max_depth = max_depth
        self.max_workers = max_workers
        self.output_dir = f"TempData/crawled_{self.domain}"
        os.makedirs(self.output_dir, exist_ok=True)

    def is_valid_url(self, url):
        """检查URL是否属于目标域名且有效"""
        parsed = urlparse(url)
        return parsed.netloc == self.domain and parsed.scheme in ['http', 'https']

    def get_page(self, url):
        """获取页面内容并解析"""
        try:
            headers = {'User-Agent': 'Mozilla/5.0'}
            response = requests.get(url, headers=headers, timeout=10)
            response.raise_for_status()
            return response.text
        except Exception as e:
            print(f"获取 {url} 失败: {str(e)}")
            return None

    def extract_links(self, html, current_url):
        """从HTML中提取所有有效链接"""
        soup = BeautifulSoup(html, 'html.parser')
        links = set()

        for a in soup.find_all('a', href=True):
            href = a['href']
            # 处理相对路径和绝对路径
            full_url = urljoin(current_url, href)
            # 去除锚点
            full_url = full_url.split('#')[0]
            if self.is_valid_url(full_url):
                links.add(full_url)

        return links

    def save_content(self, url, html):
        """保存页面内容到文件"""
        filename = url.replace('/', '_') or 'index'
        filename = filename.lstrip('_') + '.html'
        filepath = os.path.join(self.output_dir, filename)

        with open(filepath, 'w', encoding='utf-8') as f:
            f.write(html)

    def crawl_page(self, url, depth):
        """爬取单个页面"""
        if url in self.visited or depth > self.max_depth:
            return

        print(f"正在爬取: {url} (深度: {depth})")
        self.visited.add(url)

        html = self.get_page(url)
        if not html:
            return

        self.save_content(url, html)

        # 提取新链接并加入待爬队列
        new_links = self.extract_links(html, url)
        for link in new_links:
            if link not in self.visited and link not in self.to_visit:
                self.to_visit.add((link, depth + 1))

    def start_crawling(self):
        """启动爬虫"""
        self.to_visit.add((self.base_url, 0))

        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            while self.to_visit:
                url, depth = self.to_visit.pop()
                executor.submit(self.crawl_page, url, depth)
                time.sleep(1)  # 礼貌爬取，避免被封


if __name__ == "__main__":
    target_url = "http://leuven-instruments.cn/"  # 替换为目标网站
    crawler = SiteCrawler(target_url, max_depth=3)
    crawler.start_crawling()
    print(f"爬取完成！共爬取 {len(crawler.visited)} 个页面")
