import asyncio
from playwright.async_api import async_playwright
import time

class NovelParser:
    def __init__(self, headless=True, baseurl=''):
        self.headless = headless
        self.browser = None
        self.page = None
        self.current_url = None
        self.baseurl = baseurl.rstrip('/')

    async def init_browser(self):
        """初始化浏览器"""
        self.browser = None
        self.page = None
        try:
            print("开始初始化playwright...")
            playwright = await async_playwright().start()
            print("playwright初始化成功")
            
            print("开始启动chromium浏览器...")
            self.browser = await playwright.chromium.launch(
                headless=self.headless,
                args=[
                    '--disable-gpu',
                    '--disable-dev-shm-usage',
                    '--disable-setuid-sandbox',
                    '--no-sandbox'
                ]
            )
            
            if not self.browser:
                print("浏览器启动失败，未创建浏览器实例")
                return
            else:
                print("浏览器启动成功")
                
            print("开始创建新页面...")
            self.page = await self.browser.new_page()
            
            if not self.page:
                print("创建页面失败，page为None")
                return
            else:
                print("页面创建成功")
                
            # 暂时跳过设置页面超时时间，因为出现了NoneType错误
            print("跳过设置页面超时时间...")
            print("浏览器初始化完成")
        except Exception as e:
            print(f"初始化浏览器时出错: {e}")
            self.browser = None
            self.page = None

    async def close_browser(self):
        """关闭浏览器"""
        if self.browser:
            try:
                await self.browser.close()
            except Exception as e:
                print(f"关闭浏览器时出错: {e}")
        self.browser = None
        self.page = None

    async def parse_novel_chapter(self, url):
        """
        解析小说章节内容
        :param url: 小说章节URL
        :return: dict 包含章节内容、上一章地址、下一章地址
        """
        if not self.browser or not self.page:
            await self.init_browser()
            
        # 再次检查浏览器和页面是否初始化成功
        if not self.browser:
            print(f"浏览器未初始化成功，无法解析章节: {url}")
            return {
                'title': '',
                'content': '',
                'prev_url': '',
                'next_url': '',
                'current_url': url,
                'error': '浏览器未初始化成功'
            }
        
        if not self.page:
            print(f"页面未初始化成功，无法解析章节: {url}")
            return {
                'title': '',
                'content': '',
                'prev_url': '',
                'next_url': '',
                'current_url': url,
                'error': '页面未初始化成功'
            }

        self.current_url = url
        try:
            # 访问网页
            await self.page.goto(url, wait_until='domcontentloaded')
            await self.page.wait_for_timeout(1000)  # 等待页面加载完成

            # 提取小说内容
            # 注意：这里的选择器是通用的，实际使用时需要根据目标网站调整
            content = await self._extract_content()
            prev_chapter_url = await self._extract_prev_chapter()
            next_chapter_url = await self._extract_next_chapter()
            chapter_title = await self._extract_title()

            return {
                'title': chapter_title,
                'content': content,
                'prev_url': prev_chapter_url,
                'next_url': next_chapter_url,
                'current_url': url
            }
        except Exception as e:
            print(f"解析章节失败: {e}")
            return {
                'title': '',
                'content': '',
                'prev_url': '',
                'next_url': '',
                'current_url': url,
                'error': str(e)
            }

    async def preload_next_chapters(self, start_url, num_chapters=5):
        """
        预加载接下来的章节内容
        :param start_url: 起始章节URL
        :param num_chapters: 预加载的章节数量
        :return: list 包含预加载的章节数据
        """
        preloaded_chapters = []
        current_url = start_url

        for i in range(num_chapters):
            print(f"预加载第 {i+1} 章...")
            chapter_data = await self.parse_novel_chapter(current_url)
            preloaded_chapters.append(chapter_data)

            # 如果没有下一章，停止预加载
            if not chapter_data.get('next_url') or chapter_data.get('error'):
                break

            current_url = chapter_data['next_url']
            # 避免请求过快
            await asyncio.sleep(1)

        return preloaded_chapters

    async def _extract_content(self):
        """提取小说内容"""
        # 尝试多种常见的小说内容选择器
        selectors = [
            'div.content',
            'div#content',
            'div.novel-content',
            'div.chapter-content',
            'article',
            'div.text'
        ]

        for selector in selectors:
            elements = await self.page.query_selector_all(selector)
            if elements:
                # 获取所有段落文本并连接
                paragraphs = []
                for element in elements:
                    text = await element.inner_text()
                    # 清理文本，移除广告和无关内容
                    cleaned_text = self._clean_content(text)
                    paragraphs.append(cleaned_text)
                return '\n'.join(paragraphs)

        # 如果没有找到匹配的选择器，返回空字符串
        return ''

    async def _extract_title(self):
        """提取章节标题"""
        selectors = [
            'h1',
            'h2',
            'div.title',
            'div.chapter-title',
            'h1.chapter-title'
        ]

        for selector in selectors:
            element = await self.page.query_selector(selector)
            if element:
                return await element.inner_text()

        return '未知章节'

    async def _extract_prev_chapter(self):
        """提取上一章地址"""
        # 尝试多种常见的上一章链接选择器
        selectors = [
            'a:has-text("上一章")',
            'a.prev',
            'a[href*="prev"]',
            'a[title*="上一章"]'
        ]

        for selector in selectors:
            element = await self.page.query_selector(selector)
            if element:
                url = await element.get_attribute('href')
                # 处理相对路径
                if url and url.startswith('/') and self.baseurl:
                    return f'{self.baseurl}{url}'
                return url

        return ''

    async def _extract_next_chapter(self):
        """提取下一章地址"""
        # 尝试多种常见的下一章链接选择器
        selectors = [
            'a:has-text("下一章")',
            'a.next',
            'a[href*="next"]',
            'a[title*="下一章"]'
        ]

        for selector in selectors:
            element = await self.page.query_selector(selector)
            if element:
                url = await element.get_attribute('href')
                # 处理相对路径
                if url and url.startswith('/') and self.baseurl:
                    return f'{self.baseurl}{url}'
                return url

        return ''

    def _clean_content(self, text):
        """清理小说内容，移除广告和无关文本"""
        # 移除常见的广告文本
        ads = [
            '请收藏本站',
            '最新网址',
            '手机阅读',
            'APP下载',
            '本章未完，点击下一页继续阅读',
            '加入书架',
            '广告',
            '推荐阅读'
        ]

        for ad in ads:
            text = text.replace(ad, '')

        # 移除多余的空行
        lines = [line.strip() for line in text.split('\n') if line.strip()]
        return '\n'.join(lines)

# 使用示例
async def main():
    # 从测试URL中提取baseurl（仅保留协议和域名）
    test_url = 'https://www.beqege.cc/80/1062.html'
    from urllib.parse import urlparse
    parsed_url = urlparse(test_url)
    baseurl = f'{parsed_url.scheme}://{parsed_url.netloc}'
    parser = NovelParser(headless=True, baseurl=baseurl)  # 无头模式，不打开浏览器
    try:
        # 测试解析单个章节
        chapter_url = 'https://www.beqege.cc/80/1062.html'  # 替换为实际的小说章节URL
        chapter_data = await parser.parse_novel_chapter(chapter_url)
        print(f"章节标题: {chapter_data['title']}")
        print(f"章节内容: {chapter_data['content'][:200]}...")  # 只打印前200个字符
        print(f"上一章地址: {chapter_data['prev_url']}")
        print(f"下一章地址: {chapter_data['next_url']}")

        # 测试预加载下5章
        if chapter_data['next_url']:
            print("\n开始预加载下5章...")
            preloaded_chapters = await parser.preload_next_chapters(chapter_data['next_url'], 5)
            print(f"成功预加载 {len(preloaded_chapters)} 章")
            for i, chapter in enumerate(preloaded_chapters):
                print(f"第 {i+1} 章: {chapter['title']}")
    finally:
        await parser.close_browser()

if __name__ == '__main__':
    # 检查是否安装了playwright
    try:
        import playwright
    except ImportError:
        print("未检测到playwright库，请先安装:")
        print("pip install playwright")
        print("playwright install")
        exit(1)

    # 运行主函数
    asyncio.run(main())