import scrapy
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from scrapy.http import HtmlResponse, Request


class ExampleSpider(scrapy.Spider):
    name = "example"
    start_urls = ["https://www.shanghairanking.cn/rankings/bcur/2025"]  # 替换为实际目标网站

    custom_settings = {
        'SELENIUM_DRIVER_EXECUTABLE_PATH': 'C:\\Users\\apps\\chromedriver-win64\\chromedriver.exe',  # 更改为你的路径
        'CONCURRENT_REQUESTS': 1,  # 限制并发请求，因为使用Selenium
    }

    def start_requests(self):
        """初始化请求，确保设置 use_selenium 标志"""
        for url in self.start_urls:
            yield Request(
                url=url,
                callback=self.parse,
                meta={'use_selenium': True, 'page': 1},  # 设置初始页数
            )

    def parse(self, response: HtmlResponse, **kwargs):
        # 提取当前页数据 content-box
        products = scrapy.Selector(response).css('#content-box > div.rk-table-box > table > tbody > tr')
        for product in products:
            yield {
                'name': product.css('span.name-cn::text').get().strip(),
                'price': product.css('td.align-left + td::text').get().strip(),
                'description': product.css('td.align-left + td + td::text').get().strip(),
                'url': product.css('td.align-left + td + td + td::text').get().strip()
            }

        # 使用Selenium处理翻页
        driver = response.request.meta.get('driver')

        # 查找下一页按钮
        next_button = WebDriverWait(driver, 10).until(
            EC.element_to_be_clickable((By.XPATH, '//*[@id="content-box"]/ul/li[@title="下一页"]/a'))
        )

        # 检查是否到达最后一页
        if not next_button.is_enabled():
            self.logger.info("到达最后一页，停止翻页")
            return

        # 点击下一页按钮
        next_button.click()

        # 等待新页面加载
        WebDriverWait(driver, 15).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, '#content-box > div.rk-table-box > table > tbody > tr'))
        )

        # 获取新页面的URL
        current_url = driver.current_url

        # 创建新的请求
        yield Request(
            url=current_url,
            callback=self.parse,
            meta={'driver': driver},  # 传递driver实例
            dont_filter=True,  # 必须设置为True，因为URL相同
        )
