import scrapy
from scrapy import Request
from ..items import ProductItem
import re

class SpiderSpider(scrapy.Spider):
    name = "spider"
    allowed_domains = ["www.dahou2010.com"]
    start_urls = ['http://www.dahou2010.com/category/%e4%ba%a7%e5%93%81%e4%b8%ad%e5%bf%83']  # 产品中心界面

    def start_requests(self):
        for url in self.start_urls:
            yield Request(
                url=url,
                callback=self.get_category
            )


    def get_category(self, response):
        # 对 start_url 解析，找到所有的 category
        ul = response.xpath('/html/body/div/div/div[2]/div[1]/div/div/div/div[2]/div/div/div/div[2]/div/ul')
        li = ul.xpath("./li[position() >= 3]")[::-1]

        categories = []
        for item in li:
            category_item = item.xpath("./div/a")
            category_url = category_item.xpath("./@href").get().strip()
            category_name = category_item.xpath("./span/text()").get().strip()
            categories.append((
                category_name,
                category_url
            ))     # [(category_name, category_url), ...]


        for category in categories:
            category: tuple      # (category_name, category_url)
            yield Request(
                url=category[1],
                callback=self.paging,
                meta={
                    'category_name': category[0],
                    'category_url': category[1]
                }
            )


        self.logger.info(f"Found {len(categories)} categories")
        self.logger.info("Processing links")

    def paging(self, response):
        page_bar = response.css(".is-background")
        try:
            last_page = page_bar.xpath("./ul/li[last()]/a/text()").get().strip()
        except AttributeError:
            self.logger.info("no last_page element")

        if last_page and last_page.isdigit():
            max_page = int(last_page)
            for i in range(1, max_page + 1):
                page_url = f"{response.url}?page={i}"
                self.logger.info(f"start to request {response.meta['category_name']} page: {i}")
                yield Request(
                    url=page_url,
                    callback=self.parse_index,
                    meta=response.meta
                )


    def parse_index(self, response):
        index = response.xpath("/html/body/div/div/div[2]/div[2]/div/div/div")
        indices = index.xpath("./div/div/div[2]/div")
        # 一堆 div, 下面是一群div，代表products列表


        products:list[tuple] = []
        for item in indices:
            href = item.xpath("./div/div[2]/div/a/@href").get().strip()
            text = item.xpath("./div/div[2]/div/a/text()").get().strip()
            if href and text:
                products.append((
                    text, href
                ))     #  [(href, text), ...]
            # self.logger.info(f"find product —— {text}")

        for product in products:
            product: tuple   # (text, href)
            self.logger.info(f"processing detail —— {product[0]}: {product[1]}")
            yield Request(   # 对详情页发出请求
                url=product[1],
                callback=self.parse_detail,
                meta={
                    "category_name": response.meta["category_name"],
                    "product_name": product[0],
                    "product_url": product[1]
                }
            )

    @staticmethod
    def feature_fetch(feature):
        # 获取所有文本内容并检查是否为空
        text_content = ''.join(feature.xpath(".//text()").getall())
        # 检查去除所有类型空白字符后是否为空
        if not text_content or not text_content.strip().replace('\xa0', '').replace('&nbsp;', '').strip():
            return None, None

        # 进入分析
        feature_name = (
            feature.xpath("./strong/span/text()").get() or
            feature.xpath("./strong/text()").get() or
            feature.xpath("./span/strong/text()").get()
        )

        if feature_name:
            feature_name = feature_name.strip()
        else:
            feature_name = None

        try:
            feature_value = (
                ''.join(feature.xpath("./text()").getall()) or
                ''.join(feature.xpath("./span/text()").getall()) or
                ''.join(feature.xpath("./span/text()"))
            )
            if feature_value:
                feature_value = feature_value.strip()
            else:
                feature_value = None

            if not feature_value and feature_name.split("：", 1)[1]:
                parts = feature_name.split("：", 1)
                if len(parts) == 2 and parts[1].strip():
                    feature_name = parts[0].strip()
                    feature_value = parts[1].strip()

            if feature_value and feature_name is None:    # 因为feature_name提取的时候有个 or 直接用了 "./text()"
                pure_text = feature.xpath("./text()").get()
                if pure_text and pure_text.strip():
                    feature_name = pure_text.strip()

            if feature_name is None or feature_value is None:
                raise AttributeError

        except AttributeError:
            return None, None

        return feature_name, feature_value

    def parse_detail(self, response):
        item = ProductItem()
        self.logger.info(f"Processing product detail URL: {response.url}")

        box = response.xpath("//div[@class='richtext'][p]")
        if not box:
            self.logger.warning(f"No feature box found in URL: {response.url}")
            return


        # product_features 处理
        product_feature = []

        features = box.xpath("./p")
        self.logger.info(f"Found {len(features)} features in URL: {response.url}")

        for feature in features:
            feature_name, feature_value = self.feature_fetch(feature)

            if feature_name is None or feature_value is None:
                self.logger.warning(f"Different HTML structure detected in URL: {response.url}")
                continue

            product_feature.append({feature_name: feature_value})

        item["feature"] = product_feature



        # product_price 处理
        price_table = box.xpath("./table")
        if price_table:
            # 取出table中的所有文本
            all_text = ''.join(price_table.xpath(".//text()").getall())
            # 使用正则匹配价格数字（包括小数）
            price_match = re.search(r'\d+(?:\.\d+)?', all_text)
            if price_match:
                item["price"] = price_match.group()
            else:
                item["price"] = "None"
                self.logger.warning(f"No price number found in table: {response.url}")
        else:
            self.logger.warning(f"There is no price table: {response.url}")
            item["price"] = "None"


        # 其他杂项
        item["category"] = response.meta["category_name"]
        item["url"] = response.meta["product_url"]
        item["name"] = response.meta["product_name"]

        self.logger.info(item)

        yield item











