"""
列表页爬虫
从公共资源交易平台API接口获取项目信息
"""
import json
import logging
import time
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional

from spider.core.base_spider import BaseSpider
from spider.data.storage import StorageManager
from spider.models import CrawlTask
from spider.config.settings import (
    LIST_API_URL,
    BASE_URL,
    LIST_API_DEFAULT_PARAMS,
    INCREMENTAL_CRAWL,
    CRAWL_SINCE_DAYS,
    MAX_RETRIES,
    RETRY_DELAY,
    RETRY_BACKOFF,
)

logger = logging.getLogger(__name__)


class ListPageSpider(BaseSpider):
    """列表页爬虫 - 使用API接口"""
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.api_url = LIST_API_URL
        self.base_url = BASE_URL
    
    def fetch_list_page(
        self,
        page_number: int = 1,
        time_begin: Optional[str] = None,
        time_end: Optional[str] = None,
        **kwargs
    ) -> Optional[Dict[str, Any]]:
        """
        调用API获取列表页数据
        
        Args:
            page_number: 页码，从1开始
            time_begin: 开始时间，格式：YYYY-MM-DD
            time_end: 结束时间，格式：YYYY-MM-DD
            **kwargs: 其他API参数
        
        Returns:
            API返回的JSON数据字典
        """
        # 准备请求参数
        params = LIST_API_DEFAULT_PARAMS.copy()
        params['PAGENUMBER'] = str(page_number)
        
        # 设置时间范围
        if time_begin:
            params['TIMEBEGIN'] = time_begin
            params['TIMEBEGIN_SHOW'] = time_begin
        if time_end:
            params['TIMEEND'] = time_end
            params['TIMEEND_SHOW'] = time_end
        
        # 更新其他参数
        params.update(kwargs)
        
        # 准备请求头（API需要特定的请求头）
        headers = {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'X-Requested-With': 'XMLHttpRequest',
            'Referer': f'{self.base_url}/ds/deal/dealList.jsp',
            'Origin': self.base_url,
        }
        
        # 发送POST请求（使用带重试的fetch）
        response = self.fetch_with_retry(
            self.api_url,
            method='POST',
            data=params,
            headers=headers,
        )
        
        if response is None:
            return None
        
        try:
            # 解析JSON响应
            data = response.json()
            
            # 检查响应是否成功
            if not data.get('success', False):
                logger.warning(f"API returned success=False: {data}")
                return None
            
            logger.info(
                f"Fetched page {page_number}: "
                f"{len(data.get('data', []))} items, "
                f"total pages: {data.get('ttlpage', 0)}, "
                f"total rows: {data.get('ttlrow', 0)}"
            )
            
            return data
            
        except json.JSONDecodeError as e:
            logger.error(f"Failed to parse JSON response: {e}")
            logger.debug(f"Response text: {response.text[:500]}")
            return None
        except Exception as e:
            logger.error(f"Error processing API response: {e}")
            return None
    
    def parse_api_response(self, api_data: Dict[str, Any]) -> Optional[List[Dict[str, Any]]]:
        """
        解析API返回的数据
        
        Args:
            api_data: API返回的JSON数据
        
        Returns:
            列表项数据列表
        """
        try:
            items = []
            data_list = api_data.get('data', [])
            
            for item in data_list:
                try:
                    parsed_item = self._parse_api_item(item)
                    if parsed_item:
                        items.append(parsed_item)
                except Exception as e:
                    logger.warning(f"Failed to parse API item: {e}")
                    continue
            
            logger.info(f"Parsed {len(items)} items from API response")
            return items
            
        except Exception as e:
            logger.error(f"Failed to parse API response: {e}")
            return None
    
    def _parse_api_item(self, item: Dict[str, Any]) -> Optional[Dict[str, Any]]:
        """
        解析单个API返回的列表项
        
        Args:
            item: API返回的单个项目数据
        
        Returns:
            解析后的列表项数据字典
        """
        try:
            # 提取URL（必需字段）
            url = item.get('url', '')
            if not url:
                logger.warning("Item missing URL field")
                return None
            
            # 提取标题
            title = item.get('title', '') or item.get('titleShow', '')
            if not title:
                logger.warning(f"Item missing title: {url}")
                return None
            
            # 解析发布时间
            publish_time = None
            time_show = item.get('timeShow', '')
            if time_show:
                try:
                    # 尝试解析日期格式：YYYY-MM-DD
                    publish_time = datetime.strptime(time_show, '%Y-%m-%d')
                except ValueError:
                    logger.warning(f"Invalid date format: {time_show}")
            
            return {
                'title': title,
                'url': url,
                'publish_time': publish_time,
                'province': item.get('districtShow', ''),
                'source_platform': item.get('platformName', ''),
                'business_type': item.get('classifyShow', ''),
                'info_type': item.get('stageShow', ''),
                'industry': item.get('tradeShow', ''),
                # 额外字段（用于后续分析）
                'classify': item.get('classify', ''),
                'stage_name': item.get('stageName', ''),
            }
            
        except Exception as e:
            logger.warning(f"Error parsing API item: {e}")
            return None
    
    def parse_html(self, html: str) -> Optional[List[Dict[str, Any]]]:
        """
        解析HTML内容（已废弃，保留以兼容基类接口）
        
        注意：列表页现在使用API接口，不再解析HTML
        """
        logger.warning("parse_html called but list page uses API, not HTML parsing")
        return None
    
    def crawl_and_save(
        self,
        url: Optional[str] = None,
        page_number: int = 1,
        max_pages: Optional[int] = None,
        time_begin: Optional[str] = None,
        time_end: Optional[str] = None,
        auto_time_range: bool = True,
        **api_params
    ) -> int:
        """
        爬取列表页并保存数据（使用API接口）
        
        Args:
            url: 列表页URL（已废弃，保留以兼容接口）
            page_number: 起始页码，默认从第1页开始
            max_pages: 最大爬取页数，None表示爬取所有页
            time_begin: 开始时间，格式：YYYY-MM-DD
            time_end: 结束时间，格式：YYYY-MM-DD
            auto_time_range: 是否自动计算时间范围（如果未指定时间范围）
            **api_params: 其他API参数
        
        Returns:
            保存的列表项数量
        """
        # 自动计算时间范围（如果未指定）
        if auto_time_range and not time_begin and not time_end:
            # 优先使用 settings.py 中的配置
            if LIST_API_DEFAULT_PARAMS.get('TIMEBEGIN') and LIST_API_DEFAULT_PARAMS.get('TIMEEND'):
                time_begin = LIST_API_DEFAULT_PARAMS['TIMEBEGIN']
                time_end = LIST_API_DEFAULT_PARAMS['TIMEEND']
                logger.info(f"Using time range from settings.py: {time_begin} to {time_end}")
            elif INCREMENTAL_CRAWL:
                # 如果 settings.py 中没有配置，使用增量爬取逻辑
                # 获取上次爬取的时间（查找所有列表页任务，找到最新的）
                task = CrawlTask.objects.filter(
                    task_type='list',
                    status='crawled'
                ).order_by('-crawled_at').first()
                
                if task and task.crawled_at:
                    # 从上次爬取时间开始
                    time_begin = (task.crawled_at - timedelta(days=1)).strftime('%Y-%m-%d')
                    logger.info(f"Using incremental crawl from last crawl time: {time_begin}")
                else:
                    # 使用配置的默认天数
                    time_begin = (datetime.now() - timedelta(days=CRAWL_SINCE_DAYS)).strftime('%Y-%m-%d')
                    logger.info(f"Using default crawl since days: {time_begin}")
                
                # 结束时间为今天
                time_end = datetime.now().strftime('%Y-%m-%d')
            else:
                # 如果都没有配置，使用默认值（最近7天）
                time_begin = (datetime.now() - timedelta(days=CRAWL_SINCE_DAYS)).strftime('%Y-%m-%d')
                time_end = datetime.now().strftime('%Y-%m-%d')
                logger.info(f"Using default time range: {time_begin} to {time_end}")
        
        # 创建任务（使用API URL和时间范围作为任务标识）
        task_url = f"{self.api_url}?time_begin={time_begin or ''}&time_end={time_end or ''}"
        task = self.storage_manager.get_or_create_task(task_url, 'list', 'crawling')
        
        try:
            total_saved = 0
            current_page = page_number
            
            # 循环爬取所有页面
            while True:
                # 获取当前页数据（带重试）
                api_data = None
                page_retries = MAX_RETRIES
                for retry_attempt in range(page_retries + 1):
                    api_data = self.fetch_list_page(
                        page_number=current_page,
                        time_begin=time_begin,
                        time_end=time_end,
                        **api_params
                    )
                    
                    if api_data:
                        break  # 成功获取数据，退出重试循环
                    
                    if retry_attempt < page_retries:
                        wait_time = RETRY_DELAY * (RETRY_BACKOFF ** retry_attempt)
                        logger.warning(
                            f"Failed to fetch page {current_page}, "
                            f"retrying ({retry_attempt + 1}/{page_retries}) "
                            f"after {wait_time:.2f} seconds..."
                        )
                        time.sleep(wait_time)
                    else:
                        logger.error(
                            f"Failed to fetch page {current_page} after {page_retries} retries, "
                            f"skipping to next page"
                        )
                
                if not api_data:
                    # 如果重试后仍然失败，跳过当前页继续下一页
                    logger.warning(f"Skipping page {current_page} after all retries failed")
                    current_page += 1
                    # 检查是否应该继续（避免无限循环）
                    if max_pages and current_page > page_number + max_pages:
                        logger.warning("Reached max pages limit, stopping crawl")
                        break
                    continue
                
                # 解析数据
                items = self.parse_api_response(api_data)
                if not items:
                    logger.warning(f"No items found in page {current_page}")
                    break
                
                # 保存列表项
                page_saved = 0
                for item_data in items:
                    try:
                        # 检查是否应该爬取（增量爬取）
                        if not self.storage_manager.should_crawl_url(item_data['url'], 'detail'):
                            continue
                        
                        list_item = self.storage_manager.save_list_item(item_data, task)
                        page_saved += 1
                        
                        # 创建详情页爬取任务（列表页任务作为上级任务）
                        self.storage_manager.get_or_create_task(
                            item_data['url'],
                            'detail',
                            'pending',
                            parent_task=task
                        )
                        
                    except Exception as e:
                        logger.error(f"Failed to save list item: {e}")
                        continue
                
                total_saved += page_saved
                logger.info(f"Page {current_page}: saved {page_saved} items")
                
                # 检查是否还有下一页
                total_pages = api_data.get('ttlpage', 0)
                if current_page >= total_pages:
                    logger.info(f"Reached last page: {current_page}/{total_pages}")
                    break
                
                # 检查是否达到最大页数限制
                if max_pages and current_page >= page_number + max_pages - 1:
                    logger.info(f"Reached max pages limit: {current_page}")
                    break
                
                current_page += 1
            
            # 更新任务状态
            self.storage_manager.update_task_status(task, 'crawled')
            logger.info(f"Total saved {total_saved} list items from API (time range: {time_begin} to {time_end})")
            
            return total_saved
            
        except Exception as e:
            error_msg = str(e)
            logger.error(f"Failed to crawl list page via API: {error_msg}")
            self.storage_manager.update_task_status(task, 'failed', error_msg)
            return 0

