from typing import Optional, List, Tuple
import asyncio

from base.base_crawler import AbstractCrawler
from constant.model import NovelsInfo
from database.store_csv import BQGStore, BQGMongoStore
from config.settings import Settings
from database.store_csv import BQGSqliteStore, BQGMySQLStore
import logging

from playwright.async_api import (
    BrowserContext,
    BrowserType,
    Page,
    Playwright,
    async_playwright,
    Locator
)

# Configure logging
logger = logging.getLogger(__name__)


class BqgCrawler(AbstractCrawler):
    context_page: Page
    browser_context: BrowserContext

    def __init__(self, index_url: Optional[str] = None) -> None:
        self.index_url = index_url or ""
        self.user_agent = Settings.USER_AGENT
        self.logger = logger
    
    async def start(self) -> None:
        async with async_playwright() as p:
            chromium = p.chromium
            self.browser_context = await self.launch_browser(chromium, headless=True)
            # stealth.min.js is a js script to prevent the website from detecting the crawler.

            await self.browser_context.add_init_script(path="libs/stealth.min.js")
            self.context_page = await self.browser_context.new_page()
            await self.context_page.goto(self.index_url)
            await self.context_page.wait_for_load_state()
            await self.search()

    async def launch_browser(
            self, 
            chromium: BrowserType,
            user_agent: Optional[str] = None,
            headless: bool = True
            ) -> BrowserContext:
        browser_context = await chromium.launch_persistent_context(
            user_data_dir=Settings.USER_DATA_DIR,
            headless=headless,
            user_agent=user_agent,
            ignore_https_errors=True
        )
        return browser_context

class NovelsCrawler(BqgCrawler):
    context_page: Page
    browser_context: BrowserContext

    def __init__(self):
        super().__init__(Settings.MAP_URL)
        self.store = BQGMySQLStore()


    async def search(self):
        """
        1. search m.bqgl.cc map page1
        2. while _next_page_is_exist:
        3. search 
            _go_to_next_page
            search m.bqgl.cc map next_page
        """
        while True:
            await self.context_page.wait_for_load_state()
            page_title = await self.context_page.locator('.title').text_content()
            self.logger.info(f"Downloading page: {page_title}")
            self.locators: list[Locator] = await self.context_page.locator(".topli > li").all()

            # 使用 evaluate_all 一次性获取所有小说信息
            novel_data = await self.context_page.locator(".topli > li").evaluate_all("""
                (elements, indexUrl) => {
                    return elements.map(el => {
                        const link = el.querySelector('a');
                        const span = el.querySelector('span');
                        return {
                            novel_name: link ? link.textContent : '',
                            href: link ? link.getAttribute('href') : '',
                            novel_type: span ? span.textContent : ''
                        };
                    });
                }
            """, Settings.BASE_URL)

            self.chapter_urls: list[NovelsInfo] = []
            novel_tuples: List[Tuple[str, str, str]] = []
            
            for data in novel_data:
                # 确保数据有效再添加
                if data['novel_name'] and data['href']:
                    novel_index_url = Settings.BASE_URL + data['href']
                    self.chapter_urls.append(NovelsInfo(data['novel_name'], novel_index_url, data['novel_type']))
                    novel_tuples.append((data['novel_name'], novel_index_url, data['novel_type']))

            # Save to database
            # self.store.save_novels(novel_tuples)
            await self.store.store_novels(self.chapter_urls)

            if await self._next_page_is_exist():
                await self._go_to_next_page()
            else:
                break
    async def _next_page_is_exist(self) -> bool:
        return await self.context_page.locator(".page a:last-child").is_visible()

    async def _go_to_next_page(self):
        await self.context_page.locator(".page a:last-child").click()


class NovelCrawler(BqgCrawler):
    def __init__(self, index_url) -> None:
        super().__init__(index_url)
        self.store = BQGMongoStore()


    async def search(self):
        await self.get_metadata()
        try:
            self.store.store_novel_metadata(self.metadata)
        except Exception as e:
            self.logger.error(f"Error saving novel metadata to database: {e}")
        await self.context_page.locator("body > div.books > div.book_more > a").click()
        # 收集所有章节链接
        await self.context_page.wait_for_load_state()
        chapter_links = await self.context_page.locator(".book_last > dl > dd:nth-child(n+3) > a").all()
        chapter_links = await self.context_page.locator(".book_last > dl > dd:nth-child(n+3) > a").all()
        chapter_urls = []
        for link in chapter_links:
            href = await link.get_attribute("href")
            if href:
                chapter_urls.append(Settings.BASE_URL + href)
        # chapter_urls = await self.get_chapter_urls()
        # 并发处理章节内容
        semaphore = asyncio.Semaphore(Settings.MAX_CONCURRENT_PAGES)
        
        async def process_chapter(chapter_url: str, count:int):
            async with semaphore:
                # 创建新的页面来处理
                page = await self.browser_context.new_page()
                try:
                    await page.goto(chapter_url)
                    await page.wait_for_load_state()
                    chapter_title = await page.locator('.title').text_content()
                    self.logger.info(f"Downloading chapter: {chapter_title}")
                    content = await page.locator("#chaptercontent").text_content()
                    pattern = chapter_url.split('/')[-1].split('_')[0][:-5]
                    next_pattern = await page.locator("#pb_next").first.get_attribute('href')
                    while pattern == next_pattern.split('/')[-1].split('_')[0]:
                        await page.locator("#pb_next").first.click()
                        await page.wait_for_load_state()
                        content += await page.locator("#chaptercontent").text_content()
                        next_pattern = await page.locator("#pb_next").first.get_attribute('href')
                    if content:
                        chapter: dict = {"count": count,"title": chapter_title, "content": content}
                        try:
                            # Save to database
                            self.store.store_novel(self.metadata, chapter=chapter)
                        except Exception as e:
                            self.logger.error(f"Error saving chapter to database: {e}")
                except Exception as e:
                    self.logger.error(f"Error processing chapter {chapter_url}: {e}")
                finally:
                    await page.close()

        tasks = [process_chapter(url, count+1) for count, url in enumerate(chapter_urls)]
        await asyncio.gather(*tasks)

    async def _is_next_exist(self):
        return await self.context_page.locator("div.Readpage:nth-child(5) > a:nth-child(3)").is_visible()
    
    async def _goto_next(self):
        await self.context_page.locator("div.Readpage:nth-child(5) > a:nth-child(3)").click()
    async def get_metadata(self):
        title = await self.context_page.locator(".name").text_content()
        author = await self.context_page.locator("dd.dd_box:nth-child(2) > span:nth-child(1)").text_content()
        self.metadata = {"title": title, "author": author}

    async def get_chapter_urls(self) -> List[str]:
        urls = []
        url1 = await self.context_page.locator("body > div.book_last > dl > dd:nth-child(3) > a").get_attribute("href")
        urls.append(url1)
        await self.context_page.locator("body > div.book_last > dl > dd:nth-child(3) > a").click()
        # 循环开始
        while True:
            await self.context_page.wait_for_selector("#pb_next")
            tag_a = self.context_page.locator("#pb_next").first
            urls.append(await tag_a.get_attribute('href'))
            await tag_a.click()
            href = await tag_a.get_attribute('href')
            self.logger.info(f"获取到章节链接: {href}")
            if href and href[-4:] != 'html':
                break
        return [Settings.BASE_URL + url for url in urls]