from itertools import product


import scrapy
from playwright.sync_api import sync_playwright
from scrapy import Request
from scrapy_playwright.page import PageMethod
# from coursera_spider.items import CourseItem
import json
from urllib.parse import urlencode
import re
from datetime import datetime

from product_spider.items import LazadaProductItem


class LazadaSpider(scrapy.Spider):
    name = 'lazada'
    # allowed_domains = ['www.coursera.org']
    custom_settings = {
        'FEEDS': {
            'courses.json': {
                'format': 'json',
                'encoding': 'utf8',
                'store_empty': False,
                'indent': 4,
            },
        },
    }

    async def start(self):
        """仅生成初始请求，不包含任何Playwright操作"""
        # 初始页面URL（用于获取总页数）
        base_url = "https://www.lazada.com.ph/catalog/?q=Portable%20Speakers&from=hp_categories&src=all_channel"

        # 生成初始请求，通过meta告诉Playwright需要处理该页面
        # for current_page in range(1, 30):
        #     params = {
        #         'ajax': 'true',
        #         'page': current_page,
        #         'q': 'Portable Speakers',
        #         'src': 'all_channel',
        #         'service': 'all_channel',
        #         'isFirstRequest': 'true'
        #     }
        #     url = "https://www.lazada.com.ph/catalog/?" + urlencode(params)
        #     yield Request(
        #         url=url,
        #         callback=self.parse_search_results,  # 先解析总页数
        #         meta={
        #             "playwright": True,
        #             "playwright_include_page": True,  # 需要在回调中获取page对象
        #              "playwright_page_methods": [
        #                 # 隐藏webdriver特征，降低被识别为爬虫的概率
        #                 ("add_init_script", "Object.defineProperty(navigator, 'webdriver', {get: () => undefined})"),
        #                 ("set_viewport_size", {"width": 1920, "height": 1080})  # 设置窗口大小
        #             ]
        #         },
        #         errback=self.errback_close_page,
        #     )

        # 生成初始请求，通过meta告诉Playwright需要处理该页面
        yield Request(
            url=base_url,
            callback=self.parse_total_pages,  # 先解析总页数
            meta={
                "playwright": True,
                "playwright_include_page": True,  # 需要在回调中获取page对象
                 "playwright_page_methods": [
                    # 隐藏webdriver特征，降低被识别为爬虫的概率
                    ("set_viewport_size", {"width": 1920, "height": 1080})  # 设置窗口大小
                ]
            },
            errback=self.errback_close_page,
        )


    async def parse_total_pages(self, response):
        """解析总页数，再生成分页请求（此方法处理Playwright操作）"""
        page = response.meta.get("playwright_page")
        if not page:
            return
        await self.handle_captcha_manual(page)
        await page.wait_for_load_state("networkidle")

            # # 等待分页元素加载（Playwright操作移至此回调）
            # await page.wait_for_selector('.ant-pagination-total-text')
            #
            # # 获取总页数
            # total_text = await page.locator('.ant-pagination-total-text').text_content()
            # total_pages = int(total_text.split()[-1])
            # self.logger.info(f"总页数: {total_pages}")
        total_pages = await self.get_total_pages(page)
        # 生成所有分页请求
        for current_page in range(1, total_pages + 2):
            params = {
                'ajax': 'true',
                'page': current_page,
                'q': 'Portable Speakers',
                'src': 'all_channel',
                'service': 'all_channel',
                'isFirstRequest': 'true'
            }
            url = "https://www.lazada.com.ph/catalog/?" + urlencode(params)
            print(url)

            yield Request(
                url=url,
                callback=self.parse_search_results,
                dont_filter=True,
                meta={
                    "playwright": True,
                    "playwright_include_page": True,
                    # "playwright_page": page
                },
                errback=self.errback_close_page,
            )
        await page.wait_for_timeout(10000)
        await page.close()



    async def parse_search_results(self, response):
        """解析搜索结果（处理Playwright操作）"""
        print(123)
        page = response.meta.get("playwright_page")
        if not page:
            return

        try:
            # 等待页面加载完成
            await page.wait_for_load_state("networkidle")

            # 解析JSON数据
            product_datas = json.loads(response.text)
            product_items = product_datas.get("mods", {}).get("listItems", [])

            for product in product_items:
                item = LazadaProductItem()
                item['id'] = product.get('id', '')
                item['itemUrl'] = 'https:' + product.get('itemUrl', '')
                item['name'] = product.get('name', '')
                item['productSell'] = product.get('itemSoldCntShow', '')
                item['brandId'] = product.get('brandId', '')
                item['brandName'] = product.get('brandName', '')
                item['categories'] = product.get('categories', '')
                item['image'] = product.get('image', '')
                yield item

        except Exception as e:
            self.logger.error(f"解析失败: {e}")
        finally:
            # 关闭当前页面的page对象
            await page.close()

    async def errback_close_page(self, failure):
        """错误处理：关闭page对象"""
        page = failure.request.meta.get('playwright_page')
        if page:
            await page.close()

        # query_params = {
        #     "query": "",
        #     "indices[test_all_products][refinementList][language][0]": "English",
        #     "page": 1,
        #     "index": "test_all_products"
        # }
        #
        # # 可以添加更多搜索条件
        # for subject in [
        #     "computer-science", "data-science", "business",
        #     "health", "social-sciences", "arts-and-humanities"
        # ]:
        #     query_params["query"] = subject
        #     url = f"{base_url}?{urlencode(query_params)}"

        # yield Request(
        #     url=base_url,
        #     callback=self.parse_search_results,
        #     meta={
        #         "playwright": True,
        #         # "playwright_page_methods": [
        #         #     PageMethod("expect_response", path="example.png", full_page=True),
        #         # ],
        #         'playwright_page_event_handlers':{
        #             "response": self.handle_response } # 监听响应事件
        #         ,
        #         # "subject": subject,
        #         'playwright_include_page': True,
        #         "page": 1
        #     },
        #     errback=self.errback_close_page,
        # )

    async def handle_response(self, response):
        # 过滤需要的 API 响应
        if "/catalog/?ajax=true" in response.url:
           goods_data =  await response.json()
           print(goods_data)
            # try:
            #     # 解析响应数据
            #     api_data = await response.json()
            #     # 构造 Item
            #     # item = ApiDataItem()
            #     item["url"] = response.url
            #     item["data"] = api_data
            #     # 提交 Item 到 Pipeline
            #     yield item  # 直接 yield 即可进入 Scrapy 的处理流程
            # except Exception as e:
            #     self.logger.error(f"解析API响应失败: {e}")


    async def parse_search_results(self, response):
        """解析搜索结果（处理Playwright操作）"""
        print(123)
        page = response.meta.get("playwright_page")
        if not page:
            return

        try:
            # 等待页面加载完成
            await page.wait_for_load_state("networkidle")

            # 解析JSON数据
            product_datas = json.loads(response.text)
            product_items = product_datas.get("mods", {}).get("listItems", [])

            for product in product_items:
                item = LazadaProductItem()
                item['id'] = product.get('id', '')
                item['itemUrl'] = 'https:' + product.get('itemUrl', '')
                item['name'] = product.get('name', '')
                item['productSell'] = product.get('itemSoldCntShow', '')
                item['brandId'] = product.get('brandId', '')
                item['brandName'] = product.get('brandName', '')
                item['categories'] = product.get('categories', '')
                item['image'] = product.get('image', '')
                yield item

        except Exception as e:
            self.logger.error(f"解析失败: {e}")
        finally:
            # 关闭当前页面的page对象
            await page.close()

    async def handle_captcha_manual(self, page):
        try:
            await page.wait_for_selector('.captcha-container', timeout=10000)
            self.logger.warning("请手动完成验证码，按Enter继续...")
            input("按Enter继续...")
        except:
            return


        # 获取总页数
        # total_pages = await self.get_total_pages(page)
        # response.
        # 提取当前页面的产品链接
        # course_links = await page.query_selector_all("a.result-title-link")
        # async with page.expect_response(lambda r: '/catalog/' in r.url, timeout=30000) as response_info:
        #     print(response_info)
        # requests = await page.requests.all()
        # 筛选出包含 /catalog/ 的请求
        # target_requests = [req for req in requests if "/catalog/" in req.url]
        #
        # if target_requests:
        #     self.logger.info(f"找到 {len(target_requests)} 个包含 /catalog/ 的请求")
        #     for req in target_requests:
        #         # 可以获取对应的响应
        #         response = await req.response()
        #         if response:
        #             self.logger.info(f"请求 URL: {req.url}, 响应状态: {response.status}")
        #             # 获取响应内容
        #             content = await response.text()
        #             # 处理内容...
        # else:
        #     self.logger.warning("未找到包含 /catalog/ 的请求")
            # await page.goto(search_url, wait_until="domcontentloaded", timeout=60000)
        # for link in course_links:
        #     href = await link.get_attribute("href")
        #     if href and "/learn/" in href:
        #         course_url = response.urljoin(href)
        #         yield Request(
        #             url=course_url,
        #             callback=self.parse_course_page,
        #             meta={
        #                 "playwright": True,
        #                 "playwright_page_methods": [
        #                     PageMethod("wait_for_selector", "div.rc-CourseHero"),
        #                     PageMethod("wait_for_selector", "div.about-section", timeout=10000),
        #                 ],
        #                 "subject": subject
        #             },
        #             errback=self.errback_close_page,
        #         )

        # 翻页处理
        # if current_page < total_pages and current_page < 5:  # 限制爬取的页数
        #     next_page = current_page + 1
        #     next_page_url = response.url.replace(f"page={current_page}", f"page={next_page}")
        #     yield Request(
        #         url=next_page_url,
        #         callback=self.parse_search_results,
        #         meta={
        #             "playwright": True,
        #             "playwright_page_methods": [
        #                 PageMethod("wait_for_selector", "div.ais-InfiniteHits"),
        #             ],
        #             # "subject": subject,
        #             "page": next_page
        #         },
        #         errback=self.errback_close_page,
        #     )
        #
        # await page.close()

    async def get_total_pages(self, page):
        try:
            pagination = await page.query_selector("ul.ant-pagination")
            if pagination:
                last_page_button = await pagination.query_selector("li.ant-pagination-item:last-child a")
                if last_page_button:
                    last_page_text = await last_page_button.text_content()
                    return int(last_page_text.strip())
        except Exception as e:
            self.logger.warning(f"Error getting total pages: {e}")
        return 1

    # async def parse_course_page(self, response):
    #     page = response.meta["playwright_page"]
    #     subject = response.meta["subject"]
    #
    #     # 等待关键元素加载
    #     await page.wait_for_selector("div.rc-CourseHero")
    #
    #     # 使用Playwright提取数据
    #     course_item = CourseItem()
    #     course_item["subject"] = subject
    #     course_item["url"] = response.url
    #
    #     # 提取标题
    #     title_element = await page.query_selector("h1.banner-title")
    #     if title_element:
    #         course_item["title"] = await title_element.text_content()
    #
    #     # 提取机构
    #     institution_element = await page.query_selector("a.banner-subtitle")
    #     if institution_element:
    #         course_item["institution"] = await institution_element.text_content()
    #
    #     # 提取评分信息
    #     rating_element = await page.query_selector("div.rc-RatingsHeader")
    #     if rating_element:
    #         rating_text = await rating_element.text_content()
    #         match = re.search(r"(\d+.\d+)\s*★\s*((\d+,?\d*)\s*ratings?)", rating_text)
    #         if match:
    #             course_item["rating"] = float(match.group(1))
    #             course_item["rating_count"] = int(match.group(2).replace(",", ""))
    #
    #     # 提取注册人数
    #     enrolled_element = await page.query_selector("div.enrollment")
    #     if enrolled_element:
    #         enrolled_text = await enrolled_element.text_content()
    #         match = re.search(r"(\d+,?\d*)\s*already enrolled", enrolled_text)
    #         if match:
    #             course_item["enrolled"] = int(match.group(1).replace(",", ""))
    #
    #     # 提取难度、时长等信息
    #     about_section = await page.query_selector("div.about-section")
    #     if about_section:
    #         about_text = await about_section.text_content()
    #
    #         # 提取难度
    #         difficulty_match = re.search(r"Difficulty Level\s*:\s*(\w+)", about_text)
    #         if difficulty_match:
    #             course_item["difficulty"] = difficulty_match.group(1)
    #
    #         # 提取时长
    #         duration_match = re.search(r"Approx.\s*(\d+)\s*hour", about_text)
    #         if duration_match:
    #             course_item["duration"] = int(duration_match.group(1))
    #
    #     # 提取描述
    #     description_element = await page.query_selector("div.content-inner")
    #     if description_element:
    #         course_item["description"] = await description_element.text_content()
    #
    #     # 提取技能
    #     skills_elements = await page.query_selector_all("span.rc-Pill")
    #     if skills_elements:
    #         skills = []
    #         for skill_element in skills_elements:
    #             skill = await skill_element.text_content()
    #             skills.append(skill.strip())
    #         course_item["skills"] = skills
    #
    #     # 提取价格信息
    #     price_element = await page.query_selector("div.rc-PaidOnlyProductPrice")
    #     if price_element:
    #         price_text = await price_element.text_content()
    #         course_item["price"] = price_text.strip()
    #
    #     # 提取证书信息
    #     certificate_element = await page.query_selector("div.rc-Certificate")
    #     if certificate_element:
    #         certificate_text = await certificate_element.text_content()
    #         course_item["certificate"] = certificate_text.strip()
    #
    #     # 尝试获取更多动态加载的数据
    #     try:
    #         # 点击显示大纲
    #         syllabus_button = await page.query_selector("button.syllabus-toggle-button")
    #         if syllabus_button:
    #             await syllabus_button.click()
    #             await page.wait_for_selector("div.syllabus-body", timeout=5000)
    #             syllabus_element = await page.query_selector("div.syllabus-body")
    #             if syllabus_element:
    #                 course_item["syllabus"] = await syllabus_element.text_content()
    #
    #         # 点击显示评价
    #         reviews_button = await page.query_selector("button.reviews-toggle-button")
    #         if reviews_button:
    #             await reviews_button.click()
    #             await page.wait_for_selector("div.reviews-container", timeout=5000)
    #             reviews_elements = await page.query_selector_all("div.review-item")
    #             if reviews_elements:
    #                 reviews = []
    #                 for review_element in reviews_elements[:5]:  # 限制评价数量
    #                     review_text = await review_element.text_content()
    #                     reviews.append(review_text.strip())
    #                 course_item["reviews"] = reviews
    #     except Exception as e:
    #         self.logger.warning(f"Error extracting additional info: {e}")
    #
    #     course_item["last_updated"] = datetime.now().isoformat()
    #
    #     await page.close()
    #     yield course_item

    async def handle_captcha_manual(self, page):
        """手动处理滑块验证码：暂停程序等待用户操作"""
        try:
            # 检测验证码容器（5秒超时）
            await page.wait_for_selector('.captcha-container, .slide-verify', timeout=10000)
            self.logger.warning("\n===== 检测到滑块验证码 =====")
            self.logger.warning("请在浏览器中手动完成滑块验证")
            self.logger.warning("验证完成后请回到终端按 Enter 继续...")

            # 暂停程序，等待用户手动验证
            input("按 Enter 键继续...")  # 等待用户输入
            self.logger.info("已确认手动验证完成，继续爬取...")
            await page.wait_for_timeout(1000)  # 等待页面刷新

        except Exception:
            # 未检测到验证码，直接继续
            return

    async def errback_close_page(self, failure):
        url = failure.request.url
        current_page = failure.request.meta.get("current_page", "未知")
        self.logger.error(f"第{current_page}页请求失败: {url}，原因: {str(failure)}")
        page = failure.request.meta.get('playwright_page')
        if page:
            await page.close()
