"""
PicknBuy24专用爬虫服务 - 纯业务编排器
职责: 编排PicknBuy24的完整爬虫流程，支持断点续传
"""
import time
from typing import List, Dict, Any
from axiom_boot.di import service, autowired
from axiom_boot.logging.setup import get_logger
from axiom_boot.scraper import ScraperEngine, Target

# 提取器
from ..extractors.picknbuy24_extractor.list_page_extractor import ListPageExtractor
from ..extractors.picknbuy24_extractor.vehicle_detail_extractor import VehicleDetailExtractor

# 基础服务
from ...vehicle.service.vehicle_service import VehicleService
from ...storage.service.storage_service import StorageService
from ...storage.service.file_relation_service import FileRelationService
from .scraper_task_service import ScraperTaskService
from .scraper_failed_record_service import ScraperFailedRecordService
from ..models.failure_record_request import RetryRequest
from ..models.failure_record_request import (
    PageExtractFailureRequest, DetailExtractFailureRequest, DataSaveFailureRequest
)

logger = get_logger(__name__)


@service()
class PicknBuy24ScraperService:
    """PicknBuy24专用爬虫服务 - 业务编排器"""
    
    def __init__(self,
                 vehicle_service: VehicleService = autowired(),
                 storage_service: StorageService = autowired(), 
                 file_relation_service: FileRelationService = autowired(),
                 task_service: ScraperTaskService = autowired(),
                 failed_record_service: ScraperFailedRecordService = autowired(),
                 scraper_engine: ScraperEngine = autowired()):
        self.vehicle_service = vehicle_service
        self.storage_service = storage_service
        self.file_relation_service = file_relation_service
        self.task_service = task_service
        self.failed_record_service = failed_record_service
        self.scraper_engine = scraper_engine
    
    async def execute_full_scraping(self, 
                                   task_id: str,
                                   resume: bool = False,
                                   max_count: int = 0) -> Dict[str, Any]:
        """
        执行PicknBuy24的完整爬虫流程 - 支持断点续传
        
        Args:
            task_id: 任务ID
            resume: 是否从断点续传
            max_count: 最大数量，0表示全部
            
        Returns:
            爬取结果统计
        """
        logger.info(f"【PicknBuy24Service】开始执行完整爬虫流程 - 任务: {task_id}, 断点续传: {resume}, 最大数量: {max_count if max_count > 0 else '全部'}")
        
        # 1. 获取或创建任务记录
        if resume:
            task_record = await self.task_service.find_one_by_filters(task_id=task_id)
            if not task_record or task_record.task_status not in ['running', 'paused']:
                logger.warning(f"无法续传任务 {task_id}，创建新任务")
                resume = False
        
        if not resume:
            # 获取控制器创建的任务记录
            task_record = await self.task_service.find_one_by_filters(task_id=task_id)
            if not task_record:
                raise Exception(f"任务记录不存在: {task_id}")
            
            # 初始化任务进度配置
            if not task_record.config_params:
                task_record.config_params = {
                    "current_page": 1,
                    "total_pages": 0,  # 待探测
                    "per_page": 20,    # 固定每页20条
                    "last_processed_url": None,
                    "processed_refs": []
                }
                await self.task_service.save(task_record)
        
        # 2. 从配置中获取当前进度
        config = task_record.config_params or {}
        current_page = config.get("current_page", 1)
        total_pages = config.get("total_pages", 0)
        per_page = config.get("per_page", 20)
        processed_refs = set(config.get("processed_refs", []))
        
        total_processed = 0
        success_count = 0
        failed_count = 0
        
        # 2.1 使用注入的ScraperEngine实例
        engine = self.scraper_engine
        
        # 3. 开始爬取流程
        await self.task_service.start_task(task_id)
        
        # 3.0 缓存预热（仅全量爬取时）
        if max_count == 0:  # 全量爬取
            logger.info("【PicknBuy24Service】开始缓存预热...")
            await self.vehicle_service._build_existing_vehicles_cache()
            logger.info("【PicknBuy24Service】缓存预热完成")
        
        # 3.1 优先处理失败记录重试（简化版本）
        retry_count = await self._process_simple_failed_records(task_id)
        if retry_count > 0:
            logger.info(f"【PicknBuy24Service】处理失败记录重试完成，成功重试 {retry_count} 条")
            success_count += retry_count
        
        try:
            # 4. 自动分页爬取数据（支持数量限制）
            while True:
                # 检查是否达到数量限制
                if max_count > 0 and success_count >= max_count:
                    logger.info(f"【PicknBuy24Service】已达到数量限制 {max_count}，停止爬取")
                    break
                    
                logger.info(f"【PicknBuy24Service】开始爬取第 {current_page} 页")
                
                # 4.1 爬取当前页的车辆URL列表
                try:
                    page_urls = await self._scrape_single_page(current_page, per_page, task_id)
                    
                    if not page_urls:
                        logger.info(f"【PicknBuy24Service】第 {current_page} 页无数据，爬取完成")
                        break
                except Exception as e:
                    logger.error(f"【PicknBuy24Service】第 {current_page} 页爬取失败: {e}")
                    await self.failed_record_service.record_page_extract_failure(
                        PageExtractFailureRequest(
                            site_name="picknbuy24",
                            error=e,
                            task_id=task_id,
                            page_number=current_page,
                            target_url=f"https://www.picknbuy24.com/usedcar/?sort=refno1&limit={per_page}&page={current_page}"
                        )
                    )
                    failed_count += 1
                    current_page += 1
                    continue
                
                # 4.2 过滤已处理的URL
                new_urls = [url for url in page_urls if self._extract_ref_from_url(url) not in processed_refs]
                
                # 4.2.1 如果有数量限制，只取需要的数量
                if max_count > 0:
                    remaining_count = max_count - success_count
                    if remaining_count <= 0:
                        logger.info(f"【PicknBuy24Service】已达到数量限制 {max_count}，停止爬取")
                        break
                    new_urls = new_urls[:remaining_count]
                
                logger.info(f"【PicknBuy24Service】第 {current_page} 页获取到 {len(page_urls)} 个URL，新增 {len(new_urls)} 个")
                
                # 4.3 爬取车辆详情
                if new_urls:
                    logger.info(f"【PicknBuy24Service】开始爬取 {len(new_urls)} 个车辆详情")
                    page_results = await self._scrape_vehicle_details(new_urls, task_id)
                    logger.info(f"【PicknBuy24Service】车辆详情爬取完成，获得 {len(page_results) if page_results else 0} 条数据")
                    
                    # 4.4 保存车辆数据（仅基本信息，快速模式）
                    # 注意：这里不在并发状态下，可以安全使用事务
                    if page_results:
                        logger.info(f"【PicknBuy24Service】开始保存 {len(page_results)} 条车辆数据（仅基本信息）")
                        try:
                            saved_count = await self.vehicle_service.batch_save_vehicles_only(page_results, source="picknbuy24", update_existing=True)
                            logger.info(f"【PicknBuy24Service】保存完成，成功保存 {saved_count} 条")
                            success_count += saved_count
                            total_processed += len(page_results)
                            
                            # 记录已处理的ref_no
                            for result in page_results:
                                ref_no = result.get('ref_no')
                                if ref_no:
                                    processed_refs.add(ref_no)
                        except Exception as e:
                            logger.error(f"【PicknBuy24Service】批量保存车辆数据失败: {e}")
                            # 记录数据保存失败
                            for result in page_results:
                                ref_no = result.get('ref_no')
                                if ref_no:
                                    await self.failed_record_service.record_data_save_failure(
                                        site_name="picknbuy24",
                                        item_reference=ref_no,
                                        error=e,
                                        task_id=task_id,
                                        failure_data=result
                                    )
                            failed_count += len(page_results)
                            total_processed += len(page_results)
                    else:
                        logger.warning(f"【PicknBuy24Service】车辆详情爬取结果为空")
                
                # 4.5 更新任务进度
                current_page += 1
                config.update({
                    "current_page": current_page,
                    "total_pages": total_pages,
                    "processed_refs": list(processed_refs)
                })
                await self.task_service.update_task_progress(
                    task_id, total_processed, success_count, failed_count
                )
                
                logger.info(f"【PicknBuy24Service】第 {current_page-1} 页处理完成，累计成功: {success_count}")
            
            # 5. 售出状态检测
            logger.info("【PicknBuy24Service】开始售出状态检测...")
            current_ref_nos = set(processed_refs)  # 转换为set提高性能
            sold_count = await self.vehicle_service.mark_vehicles_as_sold(current_ref_nos, task_id)
            
            # 6. 任务完成
            final_stats = {
                "total_processed": total_processed,
                "success_count": success_count,
                "failed_count": failed_count,
                "sold_count": sold_count,
                "success_rate": (success_count / max(total_processed, 1)) * 100
            }
            await self.task_service.complete_task(task_id, final_stats)
            logger.info(f"【PicknBuy24Service】完整爬取任务完成，总成功: {success_count} 条，标记售出: {sold_count} 条")
            
        except Exception as e:
            logger.error(f"【PicknBuy24Service】爬取任务失败: {e}")
            final_stats = {
                "total_processed": total_processed,
                "success_count": success_count,
                "failed_count": failed_count,
                "success_rate": (success_count / max(total_processed, 1)) * 100 if total_processed > 0 else 0
            }
            await self.task_service.complete_task(task_id, final_stats, str(e))
            failed_count = total_processed - success_count
            
        return {
            "task_id": task_id,
            "status": "completed" if success_count > 0 else "failed",
            "total": total_processed,
            "success": success_count,
            "failed": failed_count,
            "sold": sold_count if 'sold_count' in locals() else 0
        }
    
    async def _scrape_single_page(self, page: int, per_page: int, task_id: str) -> List[str]:
        """爬取单页车辆URL列表"""
        target = Target(
            url=f"https://www.picknbuy24.com/usedcar/?sort=refno1&limit={per_page}&page={page}",
            extractor=ListPageExtractor()
        )
        
        results = await self.scraper_engine.scrape(target)
        vehicle_urls = []
        
        for item in results:
            # ListPageExtractor返回的是VehicleUrlItem列表，每个item都有url属性
            if hasattr(item, 'url'):
                vehicle_urls.append(item.url)
                
        return vehicle_urls
    
    def _extract_ref_from_url(self, url: str) -> str:
        """从URL中提取ref_no"""
        import re
        match = re.search(r'refno=([^&]+)', url)
        return match.group(1) if match else ""
    
    async def _scrape_vehicle_details(self, urls: List[str], task_id: str) -> List[Dict[str, Any]]:
        """爬取车辆详情 - 支持内部并发"""
        import asyncio
        
        vehicles_data = []
        
        # 控制并发数避免过载（建议2-3个并发）
        concurrent_limit = 2
        semaphore = asyncio.Semaphore(concurrent_limit)
        
        async def scrape_single_with_limit(url: str):
            async with semaphore:
                try:
                    vehicle_data = await self._scrape_vehicle_detail(url)
                    if vehicle_data:
                        logger.debug(f"【PicknBuy24Service】车辆详情爬取成功: {url}")
                        return vehicle_data
                    else:
                        logger.warning(f"【PicknBuy24Service】车辆详情爬取无数据: {url}")
                        # 记录详情提取失败
                        ref_no = self._extract_ref_from_url(url)
                        await self.failed_record_service.record_detail_extract_failure(
                            DetailExtractFailureRequest(
                                site_name="picknbuy24",
                                error=Exception("车辆详情爬取无数据"),
                                task_id=task_id,
                                item_reference=ref_no,
                                target_url=url
                            )
                        )
                        return None
                except Exception as e:
                    logger.error(f"【PicknBuy24Service】车辆详情爬取失败: {url} - {e}")
                    # 记录详情提取失败
                    ref_no = self._extract_ref_from_url(url)
                    await self.failed_record_service.record_detail_extract_failure(
                        DetailExtractFailureRequest(
                            site_name="picknbuy24",
                            error=e,
                            task_id=task_id,
                            item_reference=ref_no,
                            target_url=url
                        )
                    )
                    return None
        
        # 并发执行所有URL
        if urls:
            logger.info(f"【PicknBuy24Service】开始并发爬取 {len(urls)} 个车辆详情，并发数: {concurrent_limit}")
            results = await asyncio.gather(*[scrape_single_with_limit(url) for url in urls], return_exceptions=True)
            
            # 过滤成功的结果
            for result in results:
                if result and not isinstance(result, Exception):
                    vehicles_data.append(result)
        
        logger.info(f"【PicknBuy24Service】并发爬取完成，成功 {len(vehicles_data)}/{len(urls)}")
        return vehicles_data
    
    async def _scrape_vehicle_detail(self, url: str) -> Dict[str, Any]:
        """爬取单个车辆详情"""
        target = Target(url=url, extractor=VehicleDetailExtractor(), metadata={"extract_images": False})
        results = await self.scraper_engine.scrape(target)
        
        for item in results:
            if hasattr(item, 'success') and item.success:
                return item.vehicle_data
        
        return None
    
    async def _process_simple_failed_records(self, task_id: str) -> int:
        """处理失败记录重试 - 简化版本"""
        try:
            # 获取所有待重试记录，然后过滤出数据保存失败的记录
            all_pending_records = await self.failed_record_service.get_pending_retries("picknbuy24")
            data_save_records = [r for r in all_pending_records if r.failure_type == "data_save"]
            
            if not data_save_records:
                return 0
            
            logger.info(f"【PicknBuy24Service】重试 {len(data_save_records)} 条数据保存失败记录")
            return await self._retry_data_save_failures(data_save_records, task_id)
            
        except Exception as e:
            logger.error(f"【PicknBuy24Service】处理失败记录重试失败: {e}")
            return 0
    
    async def _retry_detail_extract_failures(self, records: list, task_id: str) -> int:
        """重试详情提取失败"""
        success_count = 0
        engine = self.scraper_engine
        
        for record in records:
            try:
                # 标记开始重试
                await self.failed_record_service.start_retry(
                    RetryRequest(record_id=record.id)
                )
                
                # 重新提取详情
                vehicle_data = await self._scrape_vehicle_detail(record.target_url)
                
                if vehicle_data:
                    # 保存车辆数据
                    try:
                        saved_count = await self.vehicle_service.batch_save_vehicles_only([vehicle_data], source="picknbuy24", update_existing=True)
                        if saved_count > 0:
                            # 标记重试成功
                            await self.failed_record_service.mark_retry_success(
                                RetryRequest(record_id=record.id)
                            )
                            success_count += 1
                            logger.info(f"【PicknBuy24Service】重试成功: {record.item_reference}")
                        else:
                            await self.failed_record_service.mark_retry_failed(
                                RetryRequest(record_id=record.id, error_message="数据保存失败")
                            )
                    except Exception as save_error:
                        await self.failed_record_service.mark_retry_failed(
                            RetryRequest(record_id=record.id, error_message=f"保存失败: {save_error}")
                        )
                else:
                    # 增加重试次数但继续保持pending状态
                    await self.failed_record_service.update_retry_status(
                        record.id, "pending", record.retry_count + 1, None, "重试仍无数据"
                    )
                    
            except Exception as e:
                logger.error(f"【PicknBuy24Service】重试详情提取失败: {record.item_reference} - {e}")
                await self.failed_record_service.mark_retry_failed(
                    RetryRequest(record_id=record.id, error_message=str(e))
                )
        
        return success_count
    
    async def _retry_data_save_failures(self, records: list, task_id: str) -> int:
        """重试数据保存失败"""
        success_count = 0
        
        # 收集失败数据进行批量重试
        retry_data = []
        record_map = {}
        
        for record in records:
            if record.failure_data:
                retry_data.append(record.failure_data)
                record_map[record.failure_data.get('ref_no')] = record
        
        if retry_data:
            try:
                # 批量保存
                saved_count = await self.vehicle_service.batch_save_vehicles_only(retry_data, source="picknbuy24", update_existing=True)
                
                # 标记成功的记录
                for data in retry_data:
                    ref_no = data.get('ref_no')
                    if ref_no in record_map:
                        record = record_map[ref_no]
                        await self.failed_record_service.mark_retry_success(
                            RetryRequest(record_id=record.id)
                        )
                        success_count += 1
                        logger.info(f"【PicknBuy24Service】数据保存重试成功: {ref_no}")
                        
            except Exception as e:
                logger.error(f"【PicknBuy24Service】批量数据保存重试失败: {e}")
                # 标记所有记录重试失败
                for record in records:
                    await self.failed_record_service.mark_retry_failed(
                        RetryRequest(record_id=record.id, error_message=str(e))
                    )
        
        return success_count
    
    async def execute_failed_records_only(self, task_id: str, 
                                          site_name: str = "picknbuy24", 
                                          max_retry_count: int = 3) -> Dict[str, Any]:
        """
        专门处理失败记录的重试任务 - 独立方法
        
        Args:
            task_id: 新的重试任务ID
            site_name: 站点名称
            max_retry_count: 最大重试次数限制
            
        Returns:
            重试结果统计
        """
        logger.info(f"【PicknBuy24Service】开始执行失败记录重试任务 - 任务: {task_id}")
        
        # 0.1 使用注入的ScraperEngine实例
        engine = self.scraper_engine
        
        # 1. 获取所有待重试的失败记录
        pending_records = await self.failed_record_service.get_pending_retries(site_name)
        
        if not pending_records:
            logger.info(f"【PicknBuy24Service】无待重试的失败记录")
            return {
                "task_id": task_id,
                "status": "completed",
                "total_failed_records": 0,
                "retry_success": 0,
                "retry_failed": 0,
                "skip_max_retry": 0
            }
        
        # 过滤掉超过最大重试次数的记录
        valid_records = [r for r in pending_records if r.retry_count < max_retry_count]
        skip_max_retry = len(pending_records) - len(valid_records)
        
        logger.info(f"【PicknBuy24Service】找到 {len(pending_records)} 条失败记录，{len(valid_records)} 条待重试，{skip_max_retry} 条已达最大重试次数")
        
        if not valid_records:
            return {
                "task_id": task_id,
                "status": "completed",
                "total_failed_records": len(pending_records),
                "retry_success": 0,
                "retry_failed": 0,
                "skip_max_retry": skip_max_retry
            }
        
        # 2. 按失败类型分组
        type_groups = {}
        for record in valid_records:
            failure_type = record.failure_type
            if failure_type not in type_groups:
                type_groups[failure_type] = []
            type_groups[failure_type].append(record)
        
        # 3. 统计变量
        total_valid_records = len(valid_records)
        retry_success = 0
        retry_failed = 0
        
        # 4. 按类型处理失败记录
        for failure_type, records in type_groups.items():
            logger.info(f"【PicknBuy24Service】处理 {failure_type} 类型失败记录: {len(records)} 条")
            
            if failure_type == "detail_extract":
                success = await self._retry_detail_extract_failures(records, task_id)
            elif failure_type == "data_save":
                success = await self._retry_data_save_failures(records, task_id)
            elif failure_type == "page_extract":
                # 页面提取失败通常需要重新设计，暂时跳过
                logger.info(f"【PicknBuy24Service】跳过页面提取失败记录: {len(records)} 条")
                continue
            elif failure_type == "file_download":
                # 处理图片下载失败的重试
                success = await self._retry_file_download_failures(records, task_id)
                retry_success += success
                retry_failed += len(records) - success
            else:
                logger.warning(f"【PicknBuy24Service】未知失败类型: {failure_type}")
                continue
            
            retry_success += success
            retry_failed += (len(records) - success)
        
        # 6. 汇总结果
        logger.info(f"【PicknBuy24Service】失败记录重试完成 - 总记录: {len(pending_records)}, 有效重试: {total_valid_records}, 成功: {retry_success}, 失败: {retry_failed}, 跳过: {skip_max_retry}")
        
        return {
            "task_id": task_id,
            "status": "completed",
            "total_failed_records": len(pending_records),
            "valid_retry_records": total_valid_records,
            "retry_success": retry_success,
            "retry_failed": retry_failed,
            "skip_max_retry": skip_max_retry,
            "success_rate": (retry_success / max(total_valid_records, 1)) * 100
        }
    
    async def _retry_file_download_failures(self, records: List, task_id: str) -> int:
        """重试图片下载失败的记录"""
        logger.info(f"【PicknBuy24Service】开始重试 {len(records)} 条图片下载失败记录")
        
        success_count = 0
        
        for record in records:
            try:
                # 标记开始重试
                await self.failed_record_service.start_retry(
                    RetryRequest(record_id=record.id)
                )
                
                # 获取车辆信息
                item_reference = record.item_reference  # 这应该是车辆的ref_no
                if not item_reference:
                    logger.warning(f"图片下载失败记录缺少item_reference: {record.id}")
                    continue
                
                # 查找车辆并重新设置为pending状态
                vehicles = await self.vehicle_service.find_by_filters(ref_no=item_reference)
                if not vehicles:
                    logger.warning(f"未找到车辆 ref_no: {item_reference}")
                    continue
                
                vehicle = vehicles[0]
                
                # 重新设置图片状态为pending，让图片下载任务处理
                await self.vehicle_service.update_by_pk(
                    vehicle.id, 
                    {'image_status': 'pending'}
                )
                
                # 标记重试成功
                await self.failed_record_service.mark_retry_success(
                    RetryRequest(record_id=record.id)
                )
                
                logger.info(f"成功重置车辆 {item_reference} 的图片状态为pending")
                success_count += 1
                
            except Exception as e:
                logger.error(f"重试图片下载失败: {record.item_reference} - {e}")
                
                # 标记重试失败
                await self.failed_record_service.mark_retry_failed(
                    RetryRequest(record_id=record.id, error_message=str(e))
                )
        
        logger.info(f"【PicknBuy24Service】图片下载重试完成: 成功 {success_count}/{len(records)}")
        return success_count
    
