import scrapy
from BS.items import BookItem
import re
from urllib.parse import urljoin
from scrapy_splash import SplashRequest
import random


class IxinqingSpider(scrapy.Spider):
    name = 'ixinqing_spider'
    allowed_domains = ['www.ixinqing.com','localhost'] #允许docker运行的splash请求内容
    start_urls = ['http://www.ixinqing.com/book/hot']  # 列表页起始URL

    # Lua脚本处理页面渲染（只用于详情页）
    splash_script = """
    function main(splash, args)
        splash:set_user_agent(splash.args.ua)
        assert(splash:go(args.url))
        assert(splash:wait(0.5))  -- 等待0.5秒让JS执行
        return {
            html = splash:html(),
            url = splash:url()
        }
    end
    """
    # 自定义设置
    custom_settings = {
        'DEPTH_LIMIT': 400,
    }

    def parse(self, response):
        # 提取书籍链接
        book_links = response.xpath('//h4[@class="mt-0"]/a/@href').getall()
        self.logger.info(f"提取到 {len(book_links)} 个书籍链接")

        # 批量生成请求
        requests = []
        seen_links = set()

        # 去重并处理链接
        for link in book_links:
            if link and link not in seen_links and '/book/' in link:
                seen_links.add(link)
                book_url = urljoin(response.url, link)
                requests.append(
                    SplashRequest(
                        book_url,
                        callback=self.parse_book,
                        endpoint='execute',
                        args={
                            'lua_source': self.splash_script,
                            'ua': random.choice(self.settings.get('USER_AGENTS')),
                            'timeout': 60,
                            'images': 0,
                        },
                        meta={
                            'dont_retry': True,
                            'max_retry_times': 2,
                        }
                    )
                )
        # 批量提交请求
        yield from requests

        # 提取下一页（普通请求）
        next_page = response.xpath('//div[@id="pagination"]//li[@class="next"]/a/@href').get()
        if next_page:
            next_page_url = urljoin(response.url, next_page)
            yield scrapy.Request(next_page_url, callback=self.parse)


    def parse_book(self, response):
        # 检查Splash是否成功返回渲染后的HTML
        if 'html' not in response.data:
            self.logger.error(f'Splash 无法渲染页面: {response.url}')
            return

        # 从Splash响应中获取渲染后的HTML
        html = response.data['html']
        self.logger.info(f"渲染后的HTML长度：{len(html) if html else 0}")
        # 创建Selector对象处理渲染后的HTML
        selector = scrapy.Selector(text=html)

        # 处理404页面
        if response.status == 404:
            self.logger.info(f'Book page 404: {response.url}')
            return

        item = BookItem()
        item['url'] = response.url

        # 提取标题 - 只获取主标题内容
        title_element = selector.xpath('//h1')
        if title_element:
            # 只提取主标题文本
            main_title = title_element.xpath('text()[1]').get()
            if main_title:
                item['title'] = main_title.strip()
        else:
            item['title'] = "未知"

        # 提取作者
        author_element = selector.xpath("//div[contains(@class, 'col-md-6 mb-15')][1]/text()")
        if author_element and len(author_element) > 1:
            item['author'] = author_element[1].get().strip()
        else:
            item['author'] = "未知"

        # 提取出版社
        publisher_element = selector.xpath("//div[contains(@class, 'col-md-6 mb-15')][4]/text()")
        if publisher_element and len(publisher_element) > 1:
            item['publisher'] = publisher_element[1].get().strip()
        else:
            item['publisher'] = "未知"

        # 提取价格 - 这是需要JS渲染的部分
        jd_price_element = selector.xpath("//div[@class='buy']//p[@class='mb-5 text-muted'][1]/a/text()")
        if jd_price_element:
            jd_price_text = jd_price_element.get().strip()
            # 使用正则提取价格数字
            price_match = re.search(r'￥(\d+\.?\d*)', jd_price_text)
            if price_match:
                item['price'] = price_match.group(1)
            else:
                item['price'] = "未知"
        else:
            item['price'] = "未知"

        # 提取标签
        tags = selector.xpath("//div[contains(@class, 'col-md-3')]//a[contains(@href, '/tag/')]/text()").getall()
        item['tags'] = ', '.join([tag.strip() for tag in tags]) if tags else "未分类"

        # 提取收藏个数
        fav_count_element = selector.xpath("//a[@href='/fav/book']/text()")
        if fav_count_element:
            fav_text = fav_count_element.get().strip()
            fav_match = re.search(r'\((\d+)\)', fav_text)
            item['fav_count'] = fav_match.group(1) if fav_match else "0"
        else:
            item['fav_count'] = "0"

        # 提取推荐个数
        commend_count_element = selector.xpath("//a[@href='/commend/book']/text()")
        if commend_count_element:
            commend_text = commend_count_element.get().strip()
            commend_match = re.search(r'\((\d+)\)', commend_text)
            item['commend_count'] = commend_match.group(1) if commend_match else "0"
        else:
            item['commend_count'] = "0"

        # 提取书本介绍
        book_summary_element = selector.xpath("//div[@class='all']")
        if book_summary_element:
            summary_text = ''.join(book_summary_element.xpath(".//text()").getall()).strip()
            item['summary'] = re.sub(r'\s+', ' ', summary_text)  # 合并多个空白
        else:
            # 尝试备用选择器
            alt_summary = selector.xpath("//div[contains(@class, 'book-summary')]/text()").get()
            item['summary'] = alt_summary.strip() if alt_summary else "未知"

        yield item