"""
详情页爬虫
从详情页提取项目编号、信息来源、子详情页链接
"""
import logging
import re
from typing import Dict, Any, Optional, List
import time
from bs4 import BeautifulSoup
import hashlib
from urllib.parse import urljoin, urlparse, urlunparse

from spider.core.base_spider import BaseSpider
from spider.data.storage import StorageManager
from spider.models import CrawlTask, ListItem, DetailItem
from spider.config.settings import BASE_URL

logger = logging.getLogger(__name__)


class DetailPageSpider(BaseSpider):
    """详情页爬虫"""
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.base_url = BASE_URL
    
    def parse_html(self, html: str) -> Optional[Dict[str, Any]]:
        try:
            soup = BeautifulSoup(html, 'html.parser')
            project_number = self._extract_project_number(soup)
            if not project_number:
                try:
                    soup = BeautifulSoup(html, 'lxml')
                    project_number = self._extract_project_number(soup)
                except Exception:
                    project_number = None
            if not project_number:
                return None
            info_source = self._extract_info_source(soup)
            sub_detail_urls = self._extract_sub_detail_urls(soup)
            return {
                'project_number': project_number,
                'info_source': info_source,
                'sub_detail_urls': sub_detail_urls,
            }
        except Exception as e:
            logger.error(f"Failed to parse detail page HTML: {e}")
            return None
    
    def _extract_project_number(self, soup: BeautifulSoup) -> Optional[str]:
        labels = [
            '项目编号',
            '采购项目编号',
            '招标编号',
            '采购编号',
            '政府采购项目编号',
            '招标项目编号',
        ]
        text = soup.get_text(separator=' ', strip=True)
        for label in labels:
            m = re.search(rf'{label}[：:]\s*([A-Za-z0-9\-\[\]\(\)_/]+)', text)
            if m:
                val = m.group(1).strip()
                if len(val) >= 5:
                    return val
        selectors = [
            '.project-number',
            '.project-code',
            '#projectNumber',
            'td:-soup-contains("项目编号")',
            'td:-soup-contains("采购项目编号")',
            'td:-soup-contains("招标编号")',
        ]
        for selector in selectors:
            try:
                element = soup.select_one(selector)
                if element:
                    et = element.get_text(strip=True)
                    for label in labels:
                        m = re.search(rf'{label}[：:]\s*([A-Za-z0-9\-\[\]\(\)_/]+)', et)
                        if m and len(m.group(1)) >= 5:
                            return m.group(1).strip()
                    m2 = re.search(r'([A-Za-z0-9\-\[\]\(\)_/]{8,})', et)
                    if m2:
                        return m2.group(1).strip()
            except Exception:
                continue
        header = soup.find(['h1', 'h2', 'h3'])
        if header:
            cur = header.find_next()
            steps = 0
            while cur and steps < 10:
                t = cur.get_text(strip=True) if hasattr(cur, 'get_text') else str(cur).strip()
                for label in labels:
                    m = re.search(rf'{label}[：:]\s*([A-Za-z0-9\-\[\]\(\)_/]+)', t)
                    if m and len(m.group(1)) >= 5:
                        return m.group(1).strip()
                steps += 1
                cur = cur.find_next()
        return None
    
    def _extract_info_source(self, soup: BeautifulSoup) -> str:
        """提取信息来源"""
        # 尝试多种方式提取信息来源
        patterns = [
            r'信息来源[：:]\s*([^\n]+)',
            r'来源[：:]\s*([^\n]+)',
        ]
        
        text = soup.get_text()
        for pattern in patterns:
            match = re.search(pattern, text)
            if match:
                return match.group(1).strip()
        
        # 尝试从特定元素中提取
        selectors = [
            '.info-source',
            '.source',
            'td:contains("信息来源")',
        ]
        
        for selector in selectors:
            try:
                element = soup.select_one(selector)
                if element:
                    return element.get_text(strip=True)
            except Exception:
                continue
        
        return ''
    
    def _normalize_sub_detail_url(self, url: str) -> Optional[str]:
        """
        规范化子详情页URL
        确保URL格式为：https://www.ggzy.gov.cn/information/html/b/...
        
        Args:
            url: 原始URL（可能是相对路径或绝对路径）
        
        Returns:
            规范化后的完整URL，如果不符合格式则返回None
        """
        # 子详情页的正确域名和基础路径
        SUB_DETAIL_DOMAIN = "www.ggzy.gov.cn"
        SUB_DETAIL_BASE_PATH = "/information/html/b/"
        
        # 解析URL
        parsed = urlparse(url)
        
        # 如果是相对路径，先转换为绝对路径
        if not parsed.netloc:
            # 相对路径，使用base_url拼接
            full_url = urljoin(self.base_url, url)
            parsed = urlparse(full_url)
        
        # 检查路径是否包含 /html/b/
        if '/html/b/' not in parsed.path:
            return None
        
        # 确保域名是 www.ggzy.gov.cn
        normalized_domain = SUB_DETAIL_DOMAIN
        
        # 规范化路径：确保包含 /information 前缀
        path = parsed.path
        
        # 如果路径已经是 /information/html/b/...，保持不变
        if path.startswith('/information/html/b/'):
            pass
        # 如果路径是 /html/b/...，需要添加 /information 前缀
        elif path.startswith('/html/b/'):
            path = '/information' + path
        # 如果路径以 /information 开头但不是 /information/html/b/，检查是否包含 /html/b/
        elif path.startswith('/information'):
            # 检查是否包含 /html/b/（可能在 /information 后面）
            html_b_index = path.find('/html/b/')
            if html_b_index > 0:
                # 如果 /html/b/ 前面不是 /information，需要调整
                if not path[:html_b_index].endswith('/information'):
                    path = path[:html_b_index] + '/information' + path[html_b_index:]
        else:
            # 其他情况，尝试找到 /html/b/ 的位置
            html_b_index = path.find('/html/b/')
            if html_b_index > 0:
                # 检查前面是否有 /information
                if not path[:html_b_index].endswith('/information'):
                    path = path[:html_b_index] + '/information' + path[html_b_index:]
            else:
                # 如果找不到 /html/b/，返回None
                return None
        
        # 构建规范化URL（移除fragment和query参数）
        normalized_url = f"https://{normalized_domain}{path}"
        
        return normalized_url
    
    def _extract_sub_detail_urls(self, soup: BeautifulSoup) -> List[str]:
        """提取子详情页链接（只提取 /html/b/ 格式的链接）"""
        urls = []
        seen_urls = set()  # 用于去重
        
        # 子详情页链接格式：/html/b/xxxxx 或 /information/html/b/xxxxx
        # 最终规范化格式：https://www.ggzy.gov.cn/information/html/b/...
        
        # 方法1: 从所有链接的href属性中提取
        links = soup.find_all('a', href=True)
        for link in links:
            href = link.get('href', '').strip()
            if not href:
                continue
            
            # 检查是否符合 /html/b/ 格式（包括 /information/html/b/）
            if '/html/b/' in href or '/information/html/b/' in href:
                # 规范化URL
                normalized_url = self._normalize_sub_detail_url(href)
                if normalized_url and normalized_url not in seen_urls:
                    seen_urls.add(normalized_url)
                    urls.append(normalized_url)
        
        # 方法2: 从其他HTML属性中提取（onclick, data-url, data-href等）
        # 查找所有可能包含URL的元素
        all_elements = soup.find_all(True)  # 查找所有元素
        for element in all_elements:
            # 检查onclick属性
            onclick = element.get('onclick', '')
            if onclick and ('/html/b/' in onclick or '/information/html/b/' in onclick):
                # 从onclick中提取URL
                onclick_patterns = [
                    r'["\']([^"\']*?/html/b/[^"\']*?)["\']',  # 相对或绝对路径
                    r'["\'](https?://[^"\']+/html/b/[^"\']*)["\']',  # 绝对URL
                ]
                for pattern in onclick_patterns:
                    matches = re.findall(pattern, onclick)
                    for match in matches:
                        normalized_url = self._normalize_sub_detail_url(match)
                        if normalized_url and normalized_url not in seen_urls:
                            seen_urls.add(normalized_url)
                            urls.append(normalized_url)
            
            # 检查data-url属性
            data_url = element.get('data-url', '')
            if data_url and ('/html/b/' in data_url or '/information/html/b/' in data_url):
                normalized_url = self._normalize_sub_detail_url(data_url)
                if normalized_url and normalized_url not in seen_urls:
                    seen_urls.add(normalized_url)
                    urls.append(normalized_url)
            
            # 检查data-href属性
            data_href = element.get('data-href', '')
            if data_href and ('/html/b/' in data_href or '/information/html/b/' in data_href):
                normalized_url = self._normalize_sub_detail_url(data_href)
                if normalized_url and normalized_url not in seen_urls:
                    seen_urls.add(normalized_url)
                    urls.append(normalized_url)
        
        # 方法3: 从JavaScript代码中提取（支持相对路径和绝对路径）
        scripts = soup.find_all('script')
        for script in scripts:
            if not script.string:
                continue
            
            script_text = script.string
            
            # 匹配绝对URL（http://或https://开头）
            absolute_pattern = r'["\'](https?://[^"\']+/html/b/[^"\']*)["\']'
            matches = re.findall(absolute_pattern, script_text)
            for match in matches:
                normalized_url = self._normalize_sub_detail_url(match)
                if normalized_url and normalized_url not in seen_urls:
                    seen_urls.add(normalized_url)
                    urls.append(normalized_url)
            
            # 匹配相对路径（以/开头或包含/html/b/的路径）
            relative_patterns = [
                r'["\'](/[^"\']*?/html/b/[^"\']*?)["\']',  # 以/开头的相对路径
                r'["\']([^"\']*?/html/b/[^"\']*?)["\']',  # 其他相对路径
            ]
            for pattern in relative_patterns:
                matches = re.findall(pattern, script_text)
                for match in matches:
                    # 跳过已经匹配的绝对URL
                    if match.startswith('http://') or match.startswith('https://'):
                        continue
                    normalized_url = self._normalize_sub_detail_url(match)
                    if normalized_url and normalized_url not in seen_urls:
                        seen_urls.add(normalized_url)
                        urls.append(normalized_url)
        
        # 方法4: 从整个HTML文本中搜索（作为最后的兜底方案）
        html_text = str(soup)
        # 搜索所有包含/html/b/或/information/html/b/的URL模式
        comprehensive_pattern = r'(https?://[^\s<>"\']+/(?:information/)?html/b/[^\s<>"\']*|/[^\s<>"\']*?/(?:information/)?html/b/[^\s<>"\']*)'
        matches = re.findall(comprehensive_pattern, html_text)
        for match in matches:
            normalized_url = self._normalize_sub_detail_url(match)
            if normalized_url and normalized_url not in seen_urls:
                seen_urls.add(normalized_url)
                urls.append(normalized_url)
        
        logger.info(f"Extracted {len(urls)} sub detail URLs (format: /html/b/)")
        if urls:
            logger.debug(f"Sub detail URLs: {urls[:5]}...")  # 只显示前5个
        return urls
    
    def _normalize_url_for_matching(self, url: str) -> str:
        """
        规范化URL用于匹配（移除查询参数和fragment）
        
        Args:
            url: 原始URL
        
        Returns:
            规范化后的URL
        """
        try:
            parsed = urlparse(url)
            # 移除查询参数和fragment，只保留scheme、netloc和path
            normalized = urlunparse((
                parsed.scheme,
                parsed.netloc,
                parsed.path,
                '',  # params
                '',  # query
                ''   # fragment
            ))
            return normalized.rstrip('/')
        except Exception as e:
            logger.warning(f"Failed to normalize URL {url}: {e}")
            return url
    
    def _find_list_item_by_url(self, url: str) -> Optional[ListItem]:
        """
        通过URL查找关联的列表项（支持多种匹配方式）
        
        Args:
            url: 详情页URL
        
        Returns:
            找到的列表项，如果找不到则返回None
        """
        # 方法1: 精确匹配
        try:
            list_item = ListItem.objects.get(url=url)
            return list_item
        except ListItem.DoesNotExist:
            pass
        
        # 方法2: 规范化后匹配（移除查询参数）
        normalized_url = self._normalize_url_for_matching(url)
        try:
            list_item = ListItem.objects.get(url=normalized_url)
            return list_item
        except ListItem.DoesNotExist:
            pass
        
        # 方法3: 查找所有列表项，规范化后比较
        all_list_items = ListItem.objects.all()
        for item in all_list_items:
            normalized_item_url = self._normalize_url_for_matching(item.url)
            if normalized_item_url == normalized_url:
                return item
        
        # 方法4: 通过任务关系查找（如果详情页任务有parent_task）
        try:
            task = CrawlTask.objects.get(url=url, task_type='detail')
            if task.parent_task:
                # 查找该父任务关联的列表项
                list_item = ListItem.objects.filter(task=task.parent_task).first()
                if list_item:
                    # 验证URL是否匹配（规范化后）
                    normalized_item_url = self._normalize_url_for_matching(list_item.url)
                    if normalized_item_url == normalized_url:
                        return list_item
        except CrawlTask.DoesNotExist:
            pass
        
        return None
    
    def crawl_and_save(self, url: str, list_item: Optional[ListItem] = None) -> Optional[Dict[str, Any]]:
        """
        爬取详情页并保存数据
        
        Args:
            url: 详情页URL
            list_item: 关联的列表项（如果已知）
        
        Returns:
            保存的详情项对象
        """
        # 如果没有提供list_item，尝试查找
        if not list_item:
            list_item = self._find_list_item_by_url(url)
            if not list_item:
                logger.warning(f"List item not found for URL: {url}")
                # 返回None，不继续处理
                return None
            else:
                logger.info(f"Found associated list item: {list_item.title} (ID: {list_item.id})")
        
        # 获取或创建任务（如果有list_item，使用其关联的列表页任务作为上级任务）
        parent_task = list_item.task if list_item else None
        task = self.storage_manager.get_or_create_task(
            url, 
            'detail', 
            'crawling',
            parent_task=parent_task
        )
        
        try:
            response = self.fetch_with_retry(url)
            if not response:
                self.storage_manager.update_task_status(task, 'failed', 'Failed to fetch HTML')
                return None
            html_text = response.text
            # 首先保存原始HTML
            self.storage_manager.save_raw_html(url, html_text, task)
            # 解析数据（带一次备用解析器重试）
            data = None
            try:
                data = self.parse_html(html_text)
                if not data:
                    soup_lxml = BeautifulSoup(html_text, 'lxml')
                    # 尝试直接用提取函数构造数据
                    pn = self._extract_project_number(soup_lxml)
                    info_source = self._extract_info_source(soup_lxml)
                    sub_detail_urls = self._extract_sub_detail_urls(soup_lxml)
                    if pn:
                        data = {
                            'project_number': pn,
                            'info_source': info_source,
                            'sub_detail_urls': sub_detail_urls,
                        }
            except Exception:
                data = None
            # 如果仍然没有项目编号，自动生成一个
            if not data:
                soup = BeautifulSoup(html_text, 'html.parser')
                info_source = self._extract_info_source(soup)
                sub_detail_urls = self._extract_sub_detail_urls(soup)
                normalized_url = DetailItem._normalize_url(url)
                url_hash = hashlib.md5(normalized_url.encode('utf-8')).hexdigest()[:12].upper()
                auto_pn = f"AUTO-{url_hash}"
                data = {
                    'project_number': auto_pn,
                    'info_source': info_source,
                    'sub_detail_urls': sub_detail_urls,
                }
            
            # 检查项目编号是否已存在（去重）
            existing = DetailItem.objects.filter(project_number=data['project_number']).first()
            if existing:
                logger.info(f"Detail item already exists: {data['project_number']}")
                # 即使详情项已存在，也要检查并创建新的子详情页任务
                detail_item = existing
            else:
                # 保存HTML
                response = self.fetch_with_retry(url)
                if response:
                    self.storage_manager.save_raw_html(url, response.text, task)
                
                # 保存详情项
                detail_item = self.storage_manager.save_detail_item(data, list_item, task)
            
            # 创建子详情页爬取任务（详情页任务作为上级任务）
            sub_detail_urls = data.get('sub_detail_urls', [])
            logger.info(f"Found {len(sub_detail_urls)} sub detail URLs for project {data.get('project_number', 'unknown')}")
            
            created_count = 0
            skipped_count = 0
            for sub_url in sub_detail_urls:
                # 先检查任务是否已存在（不管状态如何）
                existing_task = CrawlTask.objects.filter(url=sub_url, task_type='sub_detail').first()
                if existing_task:
                    skipped_count += 1
                    logger.debug(f"Skipped sub detail URL (task already exists): {sub_url}")
                    continue
                
                # 检查是否应该爬取（增量爬取逻辑）
                if self.storage_manager.should_crawl_url(sub_url, 'sub_detail'):
                    self.storage_manager.get_or_create_task(
                        sub_url,
                        'sub_detail',
                        'pending',
                        parent_task=task
                    )
                    created_count += 1
                    logger.debug(f"Created sub detail task: {sub_url}")
                else:
                    skipped_count += 1
                    logger.debug(f"Skipped sub detail URL (filtered by should_crawl_url): {sub_url}")
            
            if sub_detail_urls:
                logger.info(
                    f"Sub detail tasks: {created_count} created, {skipped_count} skipped "
                    f"(total: {len(sub_detail_urls)})"
                )
            else:
                logger.warning(f"No sub detail URLs found in detail page: {url}")
            
            # 更新任务状态
            self.storage_manager.update_task_status(task, 'crawled')
            logger.info(f"Saved detail item: {detail_item.project_number}")
            
            return detail_item
            
        except Exception as e:
            error_msg = str(e)
            logger.error(f"Failed to crawl detail page {url}: {error_msg}")
            self.storage_manager.update_task_status(task, 'failed', error_msg)
            return None

