import re
import time
from pathlib import Path

from lxml import etree
from scrapy import Spider, Request
from scrapy_playwright.page import PageMethod

from ..items import FirstItem


class ScrollSpider(Spider):
    """Scroll down on an infinite-scroll page."""

    name = "scroll"
    custom_settings = {
        "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
        "DOWNLOAD_HANDLERS": {
            "https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
            "http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
        },
        "LOG_LEVEL": "INFO",
    }

    def start_requests(self):
        yield Request(
            url="https://commons.wikimedia.org/w/index.php?search=Google+art",
            cookies={"foo": "bar", "asdf": "qwerty"},
            meta={
                "playwright": True,
                'playwright_include_page': True,
                "playwright_page_methods": [
                    PageMethod("wait_for_selector", "div.mw-search-results-container")
                ],
            },
        )

    async def parse(self, response, **kwargs):
        page = response.meta['playwright_page']
        page_num = 1
        max_pages = 50  # 安全限制，防止无限循环

        while page_num <= max_pages:
            print(f"=== 正在处理第 {page_num} 页 ===")
            response = await page.content()
            # 1. 提取当前页数据
            items = self.extract_current_page_data(page,response)
            if len(items) >0:
                for item in items:
                    href = "https://commons.wikimedia.org"
                    match = re.search(r'/wiki/File:(.+)', "".join(item["url"]))
                    if match:
                        filename = match.group(1)
                        print(f"文件名: {filename}")
                        href = "https://commons.wikimedia.org/wiki/File:" + filename

                    yield Request(
                        url=href,
                        cookies={"foo": "bar", "asdf": "qwerty"},
                        callback=self.parse_img,
                        meta={
                            "playwright": True,
                            'playwright_include_page': True,
                            'playwright_context_kwargs': {
                                'accept_downloads': True,  # 启用下载
                            },
                            "playwright_page_methods": [
                                PageMethod("wait_for_selector", "div.fullImageLink")
                            ],
                        },
                    )

            print(f"提取到 {len(items)} 个数据项")

            # 2. 检查是否存在下一页（多种选择器策略）
            next_button = await self.find_next_button(page)

            if not next_button:
                print("没有下一页，分页结束")
                break

            # 3. 点击下一页并等待加载
            if await self.click_next_button(page, next_button):
                page_num += 1
                time.sleep(2)  # 等待页面稳定
            else:
                print("点击下一页失败")
                break

        print(f"分页完成，共处理 {page_num} 页")
        page.close()

    async def find_next_button(self,page):
        """查找下一页按钮（多种策略）"""
        next_selectors = [
            '(//div/*[@title="Next 20 results"])[1]',
        ]

        for selector in next_selectors:
            button =  page.locator(selector)
            if await button.count() > 0:
                print(f"找到下一页按钮: {selector}")
                return button

        return None

    async def click_next_button(self,page, next_button):
        """点击下一页按钮"""
        try:
            # 确保按钮可见且可点击
            if await next_button.is_visible() and await next_button.is_enabled():
                await next_button.click()

                # 等待页面加载完成
                # page.wait_for_load_state('networkidle')
                # 或者等待特定元素出现
                page.wait_for_selector('div.mw-search-results-container', timeout=10000)

                print("成功点击下一页")
                return True
            else:
                print("下一页按钮不可点击")
                return False

        except Exception as e:
            print(f"点击下一页时出错: {e}")
            return False

    def extract_current_page_data(self,page,response):
        """提取当前页面的数据"""
        items2 = []
        # 示例：提取所有项目
        # link_element = page.locator('//*[@class="mw-search-result-heading"]/a')
        # # item_elements = locate_in_iframe(self,page, '//*[@class="mw-search-result-heading"]/a/@href')
        # # 获取元素数量
        # element_count = link_element.count()
        # self.logger.info(f"找到 {element_count} 个链接元素")
        # for i in range(element_count):
        #     try:
        #         link_element = link_element.nth(i)
        #
        #         href = link_element.get_attribute('href')
        #         text = link_element.text_content()
        #         print(href, text)
        #         if href:
        #             items.append({
        #                 'title': text.strip() if text else '',
        #                 'url': href,
        #             })
        #     except Exception as e:
        #         continue
        items  = etree.HTML(response).xpath('//*[@class="mw-search-result-heading"]/a')
        for item in items:
            href = item.xpath("./@href")
            text = item.xpath('./text()')
            if href:
                items2.append({
                    'title': text,
                    'url': href,
                })
            print("ko")

        return items2
    async def parse_img(self, response, **kwargs):
        page = response.meta.get('playwright_page')
        try:
            item = etree.HTML(response.text).xpath('//*[@id="mw-content-text"]/div[3]/bdi/a')
            href = item[0].xpath("./@href")[0]
            im = FirstItem()
            im['image_urls'] = [href,]
            yield im
        finally:
            if page:
                await page.close()







