"""
PicknBuy24爬虫控制器 - 最优并发版
"""
import asyncio
from pydantic import BaseModel, Field
from axiom_boot.api.controller import BaseController, post, get
from axiom_boot.api.models import ApiResponse, success_response
from axiom_boot.di import controller
from axiom_boot.logging.setup import get_logger
from axiom_boot.core.exceptions import BusinessException
from axiom_boot.task.task_manager import TaskManager
from axiom_boot.scraper import ScraperEngine, Target

from ...vehicle.service.vehicle_service import VehicleService
from ..extractors.picknbuy24_extractor.site_probe_extractor import SiteProbeExtractor
from ..services.page_status_service import PageStatusService
from ..services.scraper_task_service import ScraperTaskService
from ..services.picknbuy24_scraper_service import PicknBuy24ScraperService
from ..services.scraper_failed_record_service import ScraperFailedRecordService
from ..services.picknbuy24_incremental_service import PicknBuy24IncrementalService
from ..services.task_status_checker import TaskStatusChecker

logger = get_logger(__name__)


class PicknBuy24Request(BaseModel):
    """PicknBuy24爬虫请求参数"""
    max_count: int = Field(default=0, description="最大数量，0为全站", ge=0)

class ImageDownloadRequest(BaseModel):
    """图片下载请求参数"""
    pass  # 图片下载不需要额外参数，处理所有pending状态的车辆

class FailedRecordsRetryRequest(BaseModel):
    """失败记录重试请求参数"""
    max_retry_count: int = Field(default=3, description="最大重试次数", ge=1, le=10)
    site_name: str = Field(default="picknbuy24", description="站点名称")

class IncrementalUpdateRequest(BaseModel):
    """增量更新请求参数 - 无需参数，全部自动探测"""
    pass


@controller("/api/picknbuy24", tags=["PicknBuy24爬虫"])
class PicknBuy24Controller(BaseController):
    """PicknBuy24专用爬虫控制器"""

    def __init__(self,
                 task_manager: TaskManager,
                 vehicle_service: VehicleService,
                 engine: ScraperEngine,
                 page_status_service: PageStatusService,
                 scraper_task_service: ScraperTaskService,
                 picknbuy24_scraper_service: PicknBuy24ScraperService,
                 failed_record_service: ScraperFailedRecordService,
                 incremental_service: PicknBuy24IncrementalService,
                 task_status_checker: TaskStatusChecker):
        self.task_manager = task_manager
        self.vehicle_service = vehicle_service
        self.engine = engine
        self.page_status_service = page_status_service
        self.scraper_task_service = scraper_task_service
        self.picknbuy24_scraper_service = picknbuy24_scraper_service
        self.failed_record_service = failed_record_service
        self.incremental_service = incremental_service
        self.task_status_checker = task_status_checker

    @post("/vehicles", summary="启动PicknBuy24车辆信息爬取")
    async def start_vehicle_scraping(self, request: PicknBuy24Request) -> ApiResponse:
        """
        启动PicknBuy24车辆爬取 - 真正的多Worker并行处理
        """
        try:
            logger.info(f"启动PicknBuy24并行爬取 - 最大数量: {request.max_count if request.max_count > 0 else '全部'}")
            
            # 1. 探测网站信息获取总页数
            site_info = await self._probe_site_info()
            total_pages = site_info.total_pages
            per_page = site_info.per_page
            
            # 2. 如果有数量限制，计算需要的页数
            if request.max_count > 0:
                required_pages = (request.max_count + per_page - 1) // per_page
                total_pages = min(total_pages, required_pages)
            
            # 3. 计算任务分割参数 - 动态调整任务粒度
            if total_pages <= 8:
                pages_per_task = 1  # 小数据量：每个任务1页，最大化并行度
            elif total_pages <= 40:
                pages_per_task = max(1, total_pages // 8)  # 中等数据量：尽量用满8个worker
            else:
                pages_per_task = 5  # 大数据量：保守分割，每个任务5页
            total_tasks = (total_pages + pages_per_task - 1) // pages_per_task
            
            logger.info(f"任务分割计划: 总页数 {total_pages}, 每任务 {pages_per_task} 页, 总任务数 {total_tasks}")
            
            # 4. 创建主任务记录用于统计
            main_task_id = f"picknbuy24_parallel_{self._generate_task_id()}"
            await self.scraper_task_service.create_task_record(
                task_name=f"PicknBuy24并行爬取",
                task_type="parallel_scraping", 
                target_site="picknbuy24",
                target_count=request.max_count if request.max_count > 0 else total_pages * per_page,
                config_params={
                    "max_count": request.max_count,
                    "total_pages": total_pages,
                    "pages_per_task": pages_per_task,
                    "total_tasks": total_tasks,
                    "per_page": per_page
                }
            )
            
            # 5. 一次性提交所有并行任务到队列
            submitted_tasks = []
            current_page = 1
            
            # 准备所有任务参数
            task_params = []
            for task_num in range(total_tasks):
                start_page = current_page
                end_page = min(current_page + pages_per_task - 1, total_pages)
                sub_task_id = f"{main_task_id}_task_{task_num + 1}"
                
                task_params.append({
                    "start_page": start_page,
                    "end_page": end_page,
                    "sub_task_id": sub_task_id,
                    "pages_count": end_page - start_page + 1
                })
                
                current_page += pages_per_task
                if current_page > total_pages:
                    break
            
            # 一次性提交所有任务（真正的并行）
            for params in task_params:
                job = await self.task_manager.submit_task(
                    "scrape_picknbuy24_vehicles",
                    start_page=params["start_page"],
                    end_page=params["end_page"],
                    task_id=params["sub_task_id"],
                    per_page=per_page
                )
                
                submitted_tasks.append({
                    "sub_task_id": params["sub_task_id"],
                    "start_page": params["start_page"],
                    "end_page": params["end_page"],
                    "pages_count": params["pages_count"],
                    "job_id": str(job.job_id) if hasattr(job, 'job_id') else "pending"
                })
            
            logger.info(f"已提交 {len(submitted_tasks)} 个并行任务到队列")
            
            # 6. 启动主任务（标记为运行状态）
            await self.scraper_task_service.start_task(main_task_id)
            
            return success_response(
                data={
                    "main_task_id": main_task_id,
                    "message": "PicknBuy24并行爬取任务已启动",
                    "parallel_config": {
                        "max_count": request.max_count,
                        "total_pages": total_pages,
                        "pages_per_task": pages_per_task,
                        "total_tasks": len(submitted_tasks),
                        "parallel_processing": True,
                        "workers_note": "多Worker将并行处理不同的页面范围"
                    },
                    "submitted_tasks": submitted_tasks
                },
                message="并行爬取任务启动成功"
            )
            
        except Exception as e:
            logger.error(f"PicknBuy24并行爬取任务提交失败: {e}")
            raise BusinessException(f"任务提交失败: {e}")

    @post("/images", summary="启动PicknBuy24图片下载")
    async def start_image_download(self, request: ImageDownloadRequest) -> ApiResponse:
        """
        启动PicknBuy24图片下载 - 带任务记录和错误跟踪
        """
        try:
            # 查询待下载图片的车辆总数
            total_pending = await self.vehicle_service.count_pending_image_vehicles()
            
            if total_pending == 0:
                return success_response(data={
                    "message": "没有待下载图片的PicknBuy24车辆",
                    "total_pending": 0
                })
            
            # 创建主任务记录
            main_task_id = await self.scraper_task_service.create_task_record(
                task_name="picknbuy24_image_download",
                task_type="image_download",
                target_site="picknbuy24",
                target_count=total_pending,
                concurrent_limit=0,  # 图片下载不限制并发
                config_params={"batch_size": 50}
            )
            
            # 启动主任务
            await self.scraper_task_service.start_task(main_task_id)
            
            # 计算任务分配 - 优化批次大小提高稳定性  
            batch_size = 50   # 每个任务处理50辆车，减少内存压力，提高稳定性
            total_tasks = (total_pending + batch_size - 1) // batch_size
            
            # 【关键修复】避免worker竞争：当任务数太少时，增加任务数量
            # 当任务数少于车辆数时，优先创建更多小任务避免worker空转
            min_reasonable_tasks = 5  # 至少创建5个任务避免过度竞争
            if total_tasks < min_reasonable_tasks and total_pending >= min_reasonable_tasks:
                total_tasks = min(total_pending, min_reasonable_tasks)
                batch_size = (total_pending + total_tasks - 1) // total_tasks  # 重新计算批次大小
            
            # 提交并发任务
            tasks = []
            job_ids = []  # 收集所有job_id用于状态检查
            current_offset = 0
            
            for task_num in range(total_tasks):
                sub_task_id = f"{main_task_id}_batch_{task_num + 1}"
                current_batch = min(batch_size, total_pending - current_offset)
                
                # 提交任务 - 使用唯一job_id避免ARQ重复检测
                import uuid
                unique_job_id = f"img_{main_task_id}_{task_num}_{uuid.uuid4().hex[:8]}"
                job_ids.append(unique_job_id)  # 记录job_id
                
                await self.task_manager.submit_task(
                    "download_picknbuy24_images",
                    start_offset=current_offset,
                    batch_size=current_batch,
                    task_id=sub_task_id,
                    main_task_id=main_task_id,
                    _job_id=unique_job_id
                )
                
                # 添加任务间隔，避免同时启动过多任务造成网络压力
                if task_num < total_tasks - 1:  # 最后一个任务不需要延迟
                    import asyncio
                    await asyncio.sleep(2.0)  # 2秒间隔
                
                tasks.append({
                    "task_id": sub_task_id,
                    "job_id": unique_job_id,
                    "start_offset": current_offset,
                    "batch_size": current_batch
                })
                
                current_offset += current_batch
                
                if current_offset >= total_pending:
                    break
            
            # 保存job_ids到主任务配置中，用于后续状态检查
            task_record = await self.scraper_task_service.find_one_by_filters(task_id=main_task_id)
            if task_record:
                task_record.config_params = {"batch_size": batch_size, "sub_job_ids": job_ids}
                await self.scraper_task_service.save(task_record)
            
            logger.info(f"PicknBuy24图片下载任务提交完成: {len(tasks)}个并发任务，总计{total_pending}辆车，主任务ID: {main_task_id}")
            
            return success_response(data={
                "message": "PicknBuy24图片下载任务提交成功",
                "main_task_id": main_task_id,
                "total_tasks": len(tasks),
                "total_pending": total_pending,
                "batch_size": batch_size,
                "tasks": tasks
            })
            
        except Exception as e:
            logger.error(f"PicknBuy24图片下载任务提交失败: {e}")
            raise BusinessException(f"图片下载任务提交失败: {e}")

    @get("/stats", summary="获取PicknBuy24爬虫统计信息")
    async def get_stats(self) -> ApiResponse:
        """
        获取PicknBuy24爬虫统计信息
        """
        try:
            # 获取车辆统计
            total_vehicles = await self.vehicle_service.count()
            pending_images = await self.vehicle_service.count_pending_image_vehicles()
            completed_images = total_vehicles - pending_images
            
            # 获取页面进度
            page_progress = await self.page_status_service.get_site_progress("picknbuy24")
            
            # 获取任务记录统计
            task_stats = await self.scraper_task_service.list_recent_tasks(5)
            
            stats = {
                "vehicles": {
                    "total": total_vehicles,
                    "completed_images": completed_images,
                    "pending_images": pending_images,
                    "completion_rate": f"{(completed_images / max(total_vehicles, 1) * 100):.1f}%"
                },
                "pages": page_progress,
                "tasks": task_stats,
                "system": {
                    "pages_per_task": 10,
                    "vehicles_per_page": 100,
                    "vehicles_per_task": 1000,
                    "image_batch_size": 50
                }
            }
            
            return success_response(data=stats)
            
        except Exception as e:
            logger.error(f"获取PicknBuy24统计信息失败: {e}")
            raise BusinessException(f"获取统计信息失败: {e}")
    
    async def _probe_site_info(self):
        """探测PicknBuy24网站信息"""
        try:
            # 访问网站首页进行探测，使用limit=100参数
            target = Target(
                url="https://www.picknbuy24.com/usedcar/?sort=refno1&limit=100&page=1",
                extractor=SiteProbeExtractor()
            )
            
            results = await self.engine.scrape(target)
            
            if results and len(results) > 0:
                site_info = results[0]
                if site_info.success:
                    logger.info(f"网站探测成功: {site_info.total_vehicles}辆车，每页{site_info.per_page}辆")
                    return site_info
            
            # 如果探测失败，使用保守的默认值
            logger.warning("网站探测失败，使用默认配置")
            from ..extractors.picknbuy24_extractor.site_probe_extractor import SiteInfo
            return SiteInfo(
                total_vehicles=75000,  # 保守估计值，略高于实际可能数量
                total_pages=750,       # 基于每页100辆
                per_page=100,
                current_page=1,
                success=True
            )
            
        except Exception as e:
            logger.error(f"网站探测异常: {e}")
            # 返回默认配置
            from ..extractors.picknbuy24_extractor.site_probe_extractor import SiteInfo
            return SiteInfo(
                total_vehicles=75000,
                total_pages=750,
                per_page=100,
                current_page=1,
                success=True
            )
    
    @post("/failed-records/retry", summary="重试失败记录")
    async def retry_failed_records(self, request: FailedRecordsRetryRequest) -> ApiResponse:
        """
        专门处理失败记录的重试任务 - 并行版本
        
        这个接口独立于主爬虫流程，专门用于处理之前爬取失败的记录
        类似正常爬取，支持并行任务分配以提高性能
        """
        try:
            logger.info(f"启动失败记录重试任务 - 站点: {request.site_name}, 最大重试次数: {request.max_retry_count}")
            
            # 1. 获取所有待重试的失败记录
            pending_records = await self.failed_record_service.get_pending_retries(request.site_name)
            if not pending_records:
                return ApiResponse(
                    success=True,
                    message="没有待重试的失败记录",
                    data={"retry_count": 0}
                )
            
            # 过滤掉超过最大重试次数的记录
            valid_records = [r for r in pending_records if r.retry_count < request.max_retry_count]
            logger.info(f"发现 {len(pending_records)} 条失败记录，{len(valid_records)} 条可重试")
            
            if not valid_records:
                return ApiResponse(
                    success=True,
                    message="没有可重试的记录（已达最大重试次数）",
                    data={"retry_count": 0}
                )
            
            # 2. 创建主任务记录
            main_task_id = f"retry_{request.site_name}_{self._generate_task_id()}"
            await self.scraper_task_service.create_task_record(
                task_name=f"失败记录重试",
                task_type="failed_records_retry", 
                target_site=request.site_name,
                config_params={
                    "max_retry_count": request.max_retry_count,
                    "site_name": request.site_name,
                    "total_records": len(valid_records)
                }
            )
            
            # 3. 按类型分组并行处理（类似正常任务的并行分配）
            parallel_count = min(8, max(2, len(valid_records) // 50))  # 每50条记录一个任务，最少2个最多8个
            
            # 按失败类型分组
            type_groups = {}
            for record in valid_records:
                failure_type = record.failure_type
                if failure_type not in type_groups:
                    type_groups[failure_type] = []
                type_groups[failure_type].append(record)
            
            task_futures = []
            task_counter = 1
            
            # 4. 为每个类型创建并行任务
            for failure_type, records in type_groups.items():
                # 将同类型记录分割成多个任务
                chunk_size = max(10, len(records) // parallel_count)
                
                for i in range(0, len(records), chunk_size):
                    chunk = records[i:i + chunk_size]
                    if not chunk:
                        continue
                        
                    task_id = f"{main_task_id}_task_{task_counter}"
                    
                    # 启动并行重试任务
                    task_future = await self.task_manager.submit_task(
                        "retry_picknbuy24_failed_records_batch",  # 新的批量重试任务
                        task_id=task_id,
                        main_task_id=main_task_id,
                        failure_type=failure_type,
                        record_ids=[r.id for r in chunk],
                        site_name=request.site_name,
                        max_retry_count=request.max_retry_count
                    )
                    task_futures.append(task_future)
                    task_counter += 1
            
            logger.info(f"已启动 {len(task_futures)} 个并行重试任务，总计 {len(valid_records)} 条记录")
            
            return success_response(
                data={
                    "task_id": task_id,
                    "message": f"失败记录重试任务已启动",
                    "config": {
                        "site_name": request.site_name,
                        "max_retry_count": request.max_retry_count
                    }
                },
                message="失败记录重试任务启动成功"
            )
            
        except Exception as e:
            logger.error(f"启动失败记录重试任务失败: {e}")
            raise BusinessException(f"启动失败记录重试任务失败: {str(e)}")
    
    @get("/failed-records/stats", summary="获取失败记录统计")
    async def get_failed_records_stats(self, site_name: str = "picknbuy24") -> ApiResponse:
        """获取失败记录统计信息"""
        try:
            # 获取待重试记录
            pending_records = await self.picknbuy24_service.failed_record_service.get_pending_retries(site_name)
            
            # 按类型分组统计
            stats = {}
            for record in pending_records:
                failure_type = record.failure_type
                if failure_type not in stats:
                    stats[failure_type] = {
                        "count": 0,
                        "avg_retry_count": 0,
                        "max_retry_count": 0,
                        "recent_failures": []
                    }
                
                stats[failure_type]["count"] += 1
                stats[failure_type]["avg_retry_count"] += record.retry_count
                stats[failure_type]["max_retry_count"] = max(stats[failure_type]["max_retry_count"], record.retry_count)
                
                # 收集最近的几个失败示例
                if len(stats[failure_type]["recent_failures"]) < 5:
                    stats[failure_type]["recent_failures"].append({
                        "item_reference": record.item_reference,
                        "retry_count": record.retry_count,
                        "last_retry_time": record.last_retry_time.isoformat() if record.last_retry_time else None,
                        "error_message": record.error_message[:100] if record.error_message else None
                    })
            
            # 计算平均重试次数
            for failure_type in stats:
                if stats[failure_type]["count"] > 0:
                    stats[failure_type]["avg_retry_count"] = stats[failure_type]["avg_retry_count"] / stats[failure_type]["count"]
            
            return success_response(
                data={
                    "site_name": site_name,
                    "total_failed_records": len(pending_records),
                    "failure_types": stats,
                    "summary": {
                        "total_count": len(pending_records),
                        "types_count": len(stats),
                        "needs_retry": len([r for r in pending_records if r.retry_count < 3])
                    }
                },
                message="失败记录统计获取成功"
            )
            
        except Exception as e:
            logger.error(f"获取失败记录统计失败: {e}")
            raise BusinessException(f"获取失败记录统计失败: {str(e)}")
    
    @post("/incremental", summary="启动PicknBuy24增量更新")
    async def start_incremental_update(self, request: IncrementalUpdateRequest) -> ApiResponse:
        """
        启动PicknBuy24增量更新 - 轻量扫描和变化检测
        
        功能说明：
        1. 轻量扫描指定页数的列表页，只获取基本信息和价格
        2. 与数据库现有数据对比，检测变化
        3. 新车辆：触发详情爬取和图片下载
        4. 有变化：更新价格和状态信息
        5. 已售出：标记为sold状态
        """
        try:
            # 自动探测网站总页数和每页车辆数
            logger.info("步骤1: 开始网站探测")
            site_info = await self._probe_site_info()
            logger.info("步骤2: 网站探测完成")
            actual_max_pages = site_info.total_pages
            actual_per_page = site_info.per_page
            logger.info(f"自动探测到网站信息: {actual_max_pages}页，每页{actual_per_page}辆车")
            
            logger.info(f"开始增量更新: 扫描{actual_max_pages}页，每页{actual_per_page}辆车")
            
            # 【优化】检查是否有正在运行的增量更新任务，避免重复执行
            logger.info("步骤3: 检查现有任务")
            recent_tasks = await self.scraper_task_service.list_recent_tasks(5)
            logger.info(f"步骤4: 获取到{len(recent_tasks)}个最近任务")
            running_incremental = [t for t in recent_tasks 
                                 if t['task_type'] == 'incremental_update' 
                                 and t['task_status'] in ['pending', 'running']]
            logger.info("步骤5: 任务检查完成")
            
            if running_incremental:
                logger.warning(f"检测到正在运行的增量更新任务: {[t['task_id'] for t in running_incremental]}")
                return success_response(
                    data={
                        "message": "已有增量更新任务正在运行，请等待完成后再启动新任务",
                        "running_tasks": [t['task_id'] for t in running_incremental]
                    },
                    message="增量更新任务已在运行中"
                )
            
            # 创建主任务记录
            main_task_id = await self.scraper_task_service.create_task_record(
                task_name="picknbuy24_incremental_update",
                task_type="incremental_update",
                target_site="picknbuy24",
                target_count=actual_max_pages * actual_per_page,  # 使用实际扫描车辆数
                concurrent_limit=1,  # 增量更新使用单任务
                config_params={
                    "max_pages": actual_max_pages,
                    "per_page": actual_per_page,
                    "auto_detected": True  # 始终为自动探测
                }
            )
            
            # 启动主任务
            await self.scraper_task_service.start_task(main_task_id)
            
            # 生成子任务ID
            sub_task_id = f"{main_task_id}_incremental"
            
            # 提交增量更新任务
            await self.task_manager.submit_task(
                "picknbuy24_incremental_update",
                max_pages=actual_max_pages,
                per_page=actual_per_page,
                task_id=sub_task_id,
                main_task_id=main_task_id
            )
            
            logger.info(f"增量更新任务提交成功: 主任务ID: {main_task_id}")
            
            return success_response(
                data={
                    "main_task_id": main_task_id,
                    "sub_task_id": sub_task_id,
                    "max_pages": actual_max_pages,
                    "per_page": actual_per_page,
                    "estimated_vehicles": actual_max_pages * actual_per_page,
                    "auto_detected": True
                },
                message="增量更新任务提交成功"
            )
            
        except Exception as e:
            logger.error(f"增量更新任务提交失败: {e}")
            raise BusinessException(f"增量更新任务提交失败: {e}")

    @post("/tasks/{main_task_id}/check-status", summary="检查主任务状态并更新")
    async def check_and_update_task_status(self, main_task_id: str) -> ApiResponse:
        """
        检查指定主任务的子任务状态，如果所有子任务都已停止则更新主任务状态
        """
        try:
            updated = await self.task_status_checker.update_main_task_if_completed(main_task_id)
            
            if updated:
                return success_response(
                    data={"main_task_id": main_task_id, "updated": True},
                    message="主任务状态已更新为完成"
                )
            else:
                # 获取详细状态信息
                task_record = await self.scraper_task_service.find_one_by_filters(task_id=main_task_id)
                if not task_record:
                    raise BusinessException(f"未找到主任务: {main_task_id}")
                
                job_ids = task_record.config_params.get("sub_job_ids", [])
                if job_ids:
                    status_result = await self.task_status_checker.check_subtasks_status(main_task_id, job_ids)
                    return success_response(
                        data={
                            "main_task_id": main_task_id, 
                            "updated": False,
                            "task_status": task_record.task_status,
                            "subtask_status": status_result
                        },
                        message="主任务仍有子任务运行中"
                    )
                else:
                    return success_response(
                        data={
                            "main_task_id": main_task_id, 
                            "updated": False,
                            "task_status": task_record.task_status,
                            "message": "无子任务记录"
                        },
                        message="主任务无子任务记录"
                    )
                    
        except Exception as e:
            logger.error(f"检查任务状态失败: {e}")
            raise BusinessException(f"检查任务状态失败: {e}")

    @post("/tasks/check-all-running", summary="检查所有运行中的任务状态")
    async def check_all_running_tasks(self) -> ApiResponse:
        """
        检查所有running状态的主任务，更新已完成的任务状态
        """
        try:
            result = await self.task_status_checker.check_and_update_all_running_tasks()
            
            return success_response(
                data=result,
                message=f"检查完成，更新了{result.get('updated_count', 0)}个任务"
            )
            
        except Exception as e:
            logger.error(f"批量检查任务状态失败: {e}")
            raise BusinessException(f"批量检查任务状态失败: {e}")
    
    def _generate_task_id(self) -> str:
        """生成任务ID"""
        import time
        return str(int(time.time() * 1000))