# -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
from ..base_parser import BasePageParser
from .product_detail import ProductDetailParser


class ProductListtParser(BasePageParser):
    """新闻列表页解析器"""
    
    def can_handle(self, response):
        """判断是否为新闻列表页"""
        # 只处理列表页，不处理详情页
        return response.meta.get('page_type') == 'cccf_product_list'
    
    def parse(self, response):
        self.logger.info(f"正在解析列表页: {response.url}")
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        main_table = soup.select_one("#pageForm  table table")
        if not main_table:
            self.logger.warning("未找到主表格")
            return
            
        rows = main_table.select('table')
        
        self.logger.info(f"找到 {len(rows)} 个链接")
        
        # 收集所有详情页链接，批量处理
        detail_urls = []
        categorys = response.meta.get('categorys', [])
        
        isEmptyData = True
        for raw in rows:
            if raw.select_one('#pageText'):
                continue

            if raw.select_one('div > div') is None:
                continue

            isEmptyData = False
            
            link = raw.select_one('a')
            href = link.get('href')
            if href:
                detail_url = self.urljoin(response.url, href)
                self.logger.info(f"找到详情页链接: {detail_url}")
                
                # 收集详情页信息
                detail_categorys = categorys.copy()
                detail_categorys.append(raw.select_one('div > div').get_text(strip=True))
                detail_categorys.append(link.get_text(strip=True))
                
                detail_urls.append({
                    'url': detail_url,
                    'categorys': detail_categorys
                })
        
        # 批量生成详情页请求，提高并发
        for detail_info in detail_urls:
            yield self.make_request(
                detail_info['url'],
                callback=self._parse_detail,  # 直接指向详情页解析器
                meta={
                    'category_url': response.url,
                    'page_type': 'cccf_product_detail',
                    'categorys': detail_info['categorys'],
                }
            )
        
        # 处理下一页
        if not isEmptyData:
            from urllib.parse import urlparse, parse_qs, urlencode, urlunparse

            parsed_url = urlparse(response.url)
            query = parse_qs(parsed_url.query)
            page_num = 1
            if 'page' in query:
                try:
                    page_num = int(query['page'][0]) + 1
                except Exception:
                    page_num = 2
                query['page'] = [str(page_num)]
            else:
                query['page'] = ['2']

            new_query = urlencode(query, doseq=True)
            new_url = urlunparse(parsed_url._replace(query=new_query))

            self.logger.info(f"尝试下一页: {new_url}")

            yield self.make_request(
                new_url,
                callback=self.spider.parse,
                meta=response.meta
            )
    
    def _parse_detail(self, response):
        """直接解析详情页，避免路由开销"""
        # 找到详情页解析器
        for parser in self.spider.parsers:
            if isinstance(parser, ProductDetailParser):
                yield from parser.parse(response)
                return
        
        self.logger.warning(f"未找到详情页解析器")
           