import os
import time
from playwright.sync_api import sync_playwright
from urllib.parse import urljoin, urlparse
import yaml
import asyncio

class HuaweiDocCrawler:
    def __init__(self):
        # 加载配置
        with open('config.yml', 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
            self.config = config['crawler']
            
        self.base_url = self.config['base_url']
        self.start_url = self.config['start_url']
        self.output_dir = self.config['output_dir']
        self.delay = self.config['delay']
        self.max_depth = self.config['max_depth']
        self.max_pages = self.config['max_pages']
        
        self.visited_urls = self.load_visited_urls()
        self.urls_file = os.path.join(self.output_dir, 'visited_urls.txt')
        self.page_count = 0
        
        # 初始化 playwright
        self.playwright = sync_playwright().start()
        self.browser = self.playwright.chromium.launch(
            headless=True,
            args=['--disable-gpu', '--no-sandbox', '--disable-dev-shm-usage']
        )
        self.context = self.browser.new_context(
            viewport={'width': 1920, 'height': 1080},
            user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        )
        
    def __del__(self):
        try:
            self.context.close()
            self.browser.close()
            self.playwright.stop()
        except:
            pass

    def create_output_dir(self):
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
            
    def load_visited_urls(self):
        visited = set()
        try:
            urls_file = os.path.join(self.output_dir, 'visited_urls.txt')
            if os.path.exists(urls_file):
                with open(urls_file, 'r', encoding='utf-8') as f:
                    visited = set(line.strip() for line in f if line.strip())
                print(f"加载了 {len(visited)} 个已访问的URL")
        except Exception as e:
            print(f"加载已访问URL失败: {str(e)}")
        return visited
        
    def save_visited_urls(self):
        try:
            with open(self.urls_file, 'w', encoding='utf-8') as f:
                for url in self.visited_urls:
                    f.write(f"{url}\n")
            print(f"保存了 {len(self.visited_urls)} 个已访问的URL")
        except Exception as e:
            print(f"保存已访问URL失败: {str(e)}")
            
    def is_valid_url(self, url):
        if not url:
            return False
        if not url.startswith('http'):
            return False
            
        parsed = urlparse(url)
        if not parsed.netloc == 'developer.huawei.com':
            return False
            
        if '/doc/' not in url:
            return False
            
        # 过滤掉包含特定参数的URL
        if 'istab=' in url:
            return False
            
        base_path = url.split('?')[0]
        target_base = self.base_url.split('?')[0]
        
        if not base_path.startswith(target_base):
            return False
            
        # 如果URL包含参数，只保留catalogVersion参数
        if '?' in url:
            path = base_path
            query = parsed.query
            params = query.split('&')
            catalog_version = None
            for param in params:
                if param.startswith('catalogVersion='):
                    catalog_version = param
                    break
            if catalog_version:
                url = f"{path}?{catalog_version}"
            else:
                url = path
            
        return True
        
    def save_content(self, title, content, url):
        safe_title = "".join(c for c in title if c.isalnum() or c in (' ', '-', '_')).strip()
        filename = os.path.join(self.output_dir, f"{safe_title}.md")
        content_with_url = f"{content}\n\n本页URL：{url}"
        with open(filename, 'w', encoding='utf-8') as f:
            f.write(content_with_url)
            
    def crawl_page(self, url, depth=0):
        base_url = url.split('?')[0]
        
        if base_url in self.visited_urls:
            print(f"跳过已访问的URL: {url}")
            return
            
        if depth > self.max_depth:
            print(f"达到最大深度 {self.max_depth}，停止爬取: {url}")
            return
            
        if self.page_count >= self.max_pages:
            print(f"达到最大页面数 {self.max_pages}，停止爬取")
            return
            
        try:
            print(f"\n开始爬取第 {self.page_count + 1} 个页面")
            print(f"URL: {url}")
            
            # 每爬取100个页面重新创建浏览器上下文，释放内存
            if self.page_count > 0 and self.page_count % 100 == 0:
                print("重新创建浏览器上下文以释放内存...")
                self.context.close()
                self.browser.close()
                self.browser = self.playwright.chromium.launch(
                    headless=True,
                    args=['--disable-gpu', '--no-sandbox', '--disable-dev-shm-usage']
                )
                self.context = self.browser.new_context(
                    viewport={'width': 1920, 'height': 1080},
                    user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
                )
            
            page = self.context.new_page()
            valid_links = []  # 将链接列表移到这里
            
            try:
                page.set_default_timeout(60000)
                page.goto(url, wait_until='domcontentloaded')
                
                # 等待页面加载，直到标题不是"文档中心"
                max_wait_time = 20  # 最大等待20秒
                start_time = time.time()
                while time.time() - start_time < max_wait_time:
                    title = page.title()
                    if title and title != "文档中心":
                        break
                    time.sleep(1)
                
                # 再等待一下内容加载
                try:
                    page.wait_for_selector('.document-content', timeout=10000)
                    # 额外等待一秒，确保内容完全加载
                    time.sleep(1)
                except:
                    time.sleep(5)
                
                # 获取标题
                title = page.title()
                if title == "文档中心":
                    print("警告：页面可能未完全加载，标题仍为'文档中心'")
                    # 再次尝试等待
                    time.sleep(5)
                    title = page.title()
                
                print(f"页面标题: {title}")
                
                # 获取主要内容，添加重试机制
                content = ""
                max_retries = 3
                for retry in range(max_retries):
                    try:
                        content_element = page.locator('.document-content')
                        content = content_element.text_content()
                        if content.strip():
                            print("成功获取页面内容")
                            break
                        print(f"内容为空，重试 {retry + 1}/{max_retries}")
                        time.sleep(2)
                    except Exception as e:
                        if retry == max_retries - 1:
                            raise e
                        print(f"获取内容失败，重试 {retry + 1}/{max_retries}")
                        time.sleep(2)
                
                # 验证内容是否获取成功
                if not content.strip():
                    raise Exception("未能获取到有效内容")
                    
                # 保存内容
                self.save_content(title, content, url)
                self.visited_urls.add(base_url)
                self.page_count += 1
                print(f"成功保存第 {self.page_count} 个页面")
                
                # 获取所有链接
                links = page.query_selector_all('a[href]')
                
                for link in links:
                    try:
                        href = link.get_attribute('href')
                        if href:
                            next_url = urljoin(url, href)
                            if self.is_valid_url(next_url) and next_url.split('?')[0] not in self.visited_urls:
                                valid_links.append(next_url)
                    except:
                        continue
                
                print(f"找到 {len(valid_links)} 个新的有效链接")
                
            finally:
                page.close()
                
            # 在页面关闭后处理链接
            for next_url in valid_links:
                if self.page_count >= self.max_pages:
                    print("已达到最大页面数限制")
                    return
                
                time.sleep(self.delay)
                self.crawl_page(next_url, depth + 1)
            
            # 每爬取10个页面保存一次URL列表
            if self.page_count % 10 == 0:
                self.save_visited_urls()
            
        except Exception as e:
            print(f"爬取页面时出错: {str(e)}")
            self.save_visited_urls()
            
    def run(self):
        try:
            self.create_output_dir()
            print(f"开始爬取，配置信息：")
            print(f"基础URL: {self.base_url}")
            print(f"起始URL: {self.start_url}")
            print(f"最大深度: {self.max_depth}")
            print(f"最大页面数: {self.max_pages}")
            print(f"延迟时间: {self.delay}秒")
            print(f"已有访问记录: {len(self.visited_urls)} 个URL")
            
            self.crawl_page(self.start_url)
            
            print(f"\n爬取完成统计：")
            print(f"共爬取 {self.page_count} 个页面")
            print(f"共访问 {len(self.visited_urls)} 个URL")
            
            self.save_visited_urls()
            
        except KeyboardInterrupt:
            print("\n用户中断爬取")
            self.save_visited_urls()
        except Exception as e:
            print(f"爬取过程出错: {str(e)}")
            self.save_visited_urls()
        finally:
            self.__del__()

if __name__ == "__main__":
    crawler = HuaweiDocCrawler()
    crawler.run() 