import scrapy
from items import NovelItem

class BqgScrapySpider(scrapy.Spider):
    name = 'bqg_scrapy' # 爬虫的唯一标识

    def start_requests(self):
        """
        爬虫的入口，替代旧代码中的 start() 和 __init__。
        """
        start_url = self.settings.get('MAP_URL')
        self.logger.info(f"Starting crawl with URL: {start_url}")
        
        yield scrapy.Request(
            url=start_url,
            callback=self.parse_map_page,
            meta={
                "playwright": True,
                "playwright_include_page": True,
                "playwright_page_init_callback": self.page_init_callback,
                "playwright_context": "default",
            }
        )

    async def page_init_callback(self, page):
        """
        在页面加载前注入 stealth.min.js 防止检测。
        """
        try:
            with open("libs/stealth.min.js", 'r') as f:
                js_script = f.read()
            await page.add_init_script(script=js_script)
        except FileNotFoundError:
            self.logger.warning("stealth.min.js not found. Proceeding without it.")

    async def parse_map_page(self, response):
        """
        解析小说列表页，并处理翻页。
        """
        page = response.meta["playwright_page"]
        self.logger.info(f"Parsing page: {response.url}")

        # 1. 使用 Scrapy 的选择器解析当前页的小说列表
        for li in response.css(".topli > li"):
            novel_item = NovelItem()
            novel_item['title'] = li.css('a::text').get()
            relative_url = li.css('a::attr(href)').get()
            novel_item['url'] = response.urljoin(relative_url)
            novel_item['category'] = li.css('span::text').get()
            
            if novel_item['title'] and novel_item['url']:
                yield novel_item

        # 2. 使用 Playwright 的能力处理翻页
        try:
            next_page_button = page.locator(".page a:last-child")
            if await next_page_button.is_visible():
                self.logger.info("Next page button found, clicking...")
                await next_page_button.click()
                await page.wait_for_load_state("load", timeout=30000)

                # 获取新页面的内容并继续解析
                yield scrapy.Request(
                    url=page.url,
                    callback=self.parse_map_page,
                    meta={
                        "playwright": True,
                        "playwright_include_page": True,
                        "playwright_page_init_callback": self.page_init_callback,
                        "playwright_context": "default",
                    }
                )
            else:
                self.logger.info("No next page button found. Ending crawl.")
                await page.close()
        except Exception as e:
            self.logger.error(f"Error during pagination: {e}")
            await page.close()
