import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import time

from ..items import BookItem


class QidianSpiderSpider(CrawlSpider):
    name = "qidian_spider"
    allowed_domains = ["www.qidian.com"]
    # start_urls = ["https://www.qidian.com/", "https://www.qidian.com/free/all/"]
    start_urls = ["https://www.qidian.com/free/all/"]

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # 定义目标类别列表
        self.target_categories = ["玄幻", "奇幻", "武侠", "仙侠"]
        # 添加Chrome选项以防止反爬检测
        chrome_options = Options()
        chrome_options.add_argument("--headless=new")  # 新的无头模式
        chrome_options.add_argument("--disable-gpu")
        chrome_options.add_argument("--enable-unsafe-swiftshader")
        chrome_options.add_argument("--no-sandbox")
        chrome_options.add_argument('--disable-blink-features=AutomationControlled')
        chrome_options.add_argument("--disable-usb-discovery")
        chrome_options.add_argument("--disable-dev-shm-usage")
        chrome_options.add_argument(
            'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36')

        # 添加更多防止检测为自动化脚本的设置
        chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])
        chrome_options.add_experimental_option('useAutomationExtension', False)

        service = webdriver.ChromeService(executable_path=r'D:\tools\webdrivers\chromedriver-win64\chromedriver.exe')

        # 创建浏览器实例
        self.browser = webdriver.Chrome(service=service, options=chrome_options)

    def closed(self, reason):
        # 爬虫关闭时关闭浏览器
        if hasattr(self, 'browser'):
            self.browser.quit()
            self.logger.info("Chrome浏览器已关闭")

    rules = (
        # 翻页规则
        Rule(LinkExtractor(restrict_xpaths='//*[@id="page-container"]/div/ul/li/a'), follow=True),
        # 书籍详情页
        Rule(LinkExtractor(restrict_xpaths='//*[@id="book-img-text"]//div[@class="book-mid-info"]/h2/a'), callback="parse_book", follow=False),
        # 一级分类
        Rule(LinkExtractor(restrict_xpaths='//*[@id="free-channel-wrap"]//div[@class="work-filter type-filter"]/ul/li/a'), follow=False),
        # 二级分类
        Rule(LinkExtractor(restrict_xpaths='//*[@id="free-channel-wrap"]//div[@class="sub-type"]/dl/dd/a'), follow=False),
    )

    def parse_item(self, response):
        item = {}
        #item["domain_id"] = response.xpath('//input[@id="sid"]/@value').get()
        #item["name"] = response.xpath('//div[@id="name"]').get()
        #item["description"] = response.xpath('//div[@id="description"]').get()
        return item


    def parse_book(self, response):
        # 打印开始抓取信息
        print(f"开始抓取书籍信息: {response.url}")
        item = BookItem()
        item["title"] = response.xpath('//*[@id="bookName"]/text()').get()
        item["author"] = response.xpath('//*[@id="book-detail"]//span[@class="author"]/text()').get()
        item["category"] = response.xpath('//*[@id="book-detail"]//p[@class="book-attribute"]/a[1]/text()').get()
        # 检查是否为目标类别
        if item["category"] not in self.target_categories:
            print(f"跳过非目标类别书籍: {item['title']} ({item['category']})")
            return
        item["intro"] = response.xpath('//*[@id="book-intro-detail"]/text()').get()
        chapter_links = response.xpath('//*[@id="allCatalog"]/div/ul/li/a/@href').getall()
        item['chapters'] = [None] * len(chapter_links)  # 预分配列表空间
        print(f"开始抓取书籍{item['title']}章节信息: 共{len(chapter_links)}章")
        for index, link in enumerate(chapter_links):
            yield response.follow(
                link,
                self.parse_chapter,
                meta={'item': item, 'index': index}
            )


    def parse_chapter(self, response):
        item = response.meta['item']
        index = response.meta['index']
        chapter_title = response.xpath('//*[@id="reader-content"]/div/div/div[2]/div/h1/text()').get()
        print(f"开始抓取章节信息: {chapter_title}")

        # 使用共享的浏览器实例
        self.browser.get(response.url)
        time.sleep(1)  # 短暂等待页面加载

        # 获取所有<p>标签的文本内容
        paragraphs = self.browser.find_elements(By.XPATH, '//*/p/span[1]')
        all_text = [p.text for p in paragraphs if p.text.strip()]

        # 不再关闭浏览器，由closed方法统一处理

        content_paragraphs = all_text
        chapter_content = '\n'.join(content_paragraphs)
        print(f"已抓取章节信息: {chapter_title}")
        print(f"已抓取第{index + 1}章节内容")

        # 按照 index 插入章节内容
        item['chapters'][index] = {
            'title': chapter_title,
            'content': chapter_content
        }

        # 当所有章节都抓取完成时输出 Item
        if None not in item['chapters']:
            yield item