"""
子详情页爬虫
从子详情页获取完整的HTML正文内容
"""
import logging
from typing import Optional

from spider.core.base_spider import BaseSpider
from spider.data.storage import StorageManager
from spider.models import CrawlTask, DetailItem

logger = logging.getLogger(__name__)


class SubDetailPageSpider(BaseSpider):
    """子详情页爬虫"""
    
    def parse_html(self, html: str) -> Optional[str]:
        """
        解析子详情页HTML（直接返回HTML内容）
        
        Args:
            html: HTML内容
        
        Returns:
            HTML内容字符串
        """
        # 子详情页只需要保存完整的HTML内容，不需要特殊解析
        return html
    
    def crawl_and_save(
        self,
        url: str,
        detail_item: Optional[DetailItem] = None
    ) -> bool:
        """
        爬取子详情页并保存数据
        
        Args:
            url: 子详情页URL
            detail_item: 关联的详情项（如果已知）
        
        Returns:
            是否保存成功
        """
        # 如果没有提供detail_item，尝试查找
        if not detail_item:
            # 通过URL查找关联的详情项
            try:
                # 对于JSONField，contains应该使用字符串而不是列表
                detail_item = DetailItem.objects.filter(
                    sub_detail_urls__contains=url
                ).first()
                if detail_item:
                    logger.debug(f"Found detail_item via JSONField contains for {url}")
            except Exception as e:
                logger.debug(f"JSONField contains query failed: {e}, trying Python filter")
                # 如果contains不支持，使用Python过滤（兼容性更好）
                try:
                    all_details = DetailItem.objects.all()
                    for item in all_details:
                        if url in (item.sub_detail_urls or []):
                            detail_item = item
                            logger.debug(f"Found detail_item via Python filter for {url}")
                            break
                except Exception as e:
                    logger.warning(f"Failed to find detail item for URL {url}: {e}")
            
            # 如果还是找不到，尝试通过URL规范化匹配
            if not detail_item:
                try:
                    normalized_url = DetailItem._normalize_url(url)
                    all_details = DetailItem.objects.all()
                    for item in all_details:
                        normalized_item_urls = [DetailItem._normalize_url(u) for u in (item.sub_detail_urls or [])]
                        if normalized_url in normalized_item_urls:
                            detail_item = item
                            logger.debug(f"Found detail_item via normalized URL match for {url}")
                            break
                except Exception as e:
                    logger.debug(f"Normalized URL match failed: {e}")
        
        # 即使找不到detail_item，也继续爬取
        if not detail_item:
            logger.warning(f"Detail item not found for sub detail URL: {url}, will continue crawling anyway")
        
        # 获取或创建任务（详情页任务作为上级任务）
        parent_task = detail_item.task if detail_item else None
        task = self.storage_manager.get_or_create_task(
            url, 
            'sub_detail', 
            'crawling',
            parent_task=parent_task
        )
        
        try:
            # 爬取HTML
            response = self.fetch_with_retry(url)
            if not response:
                self.storage_manager.update_task_status(
                    task,
                    'failed',
                    'Failed to fetch HTML'
                )
                return False
            
            html_content = response.text
            
            # 保存子详情项（即使detail_item为None也可以保存）
            self.storage_manager.save_sub_detail_item(
                url,
                html_content,
                detail_item,
                task
            )
            
            # 更新任务状态
            self.storage_manager.update_task_status(task, 'crawled')
            logger.info(f"Saved sub detail item: {url}")
            
            return True
            
        except Exception as e:
            error_msg = str(e)
            logger.error(f"Failed to crawl sub detail page {url}: {error_msg}")
            self.storage_manager.update_task_status(task, 'failed', error_msg)
            return False

