"""
爬虫失败记录服务层
"""
import json
import traceback
from typing import List, Optional, Dict, Any
from datetime import datetime
from axiom_boot.core.id_generator import IdGenerator
from axiom_boot.di import autowired, service
from axiom_boot.database.base_service import BaseService
from axiom_boot.database import transactional
from axiom_boot.logging.setup import get_logger

from ..mapper.scraper_failed_record_mapper import ScraperFailedRecordMapper
from ..models.scraper_failed_record import ScraperFailedRecord, FailureType, FailureStage, RetryStatus
from ..models.failure_record_request import (
    PageExtractFailureRequest, DetailExtractFailureRequest,
    DataSaveFailureRequest, FileDownloadFailureRequest, RetryRequest,
    FailureQueryRequest, CleanupRequest
)

logger = get_logger(__name__)


@service()
class ScraperFailedRecordService(BaseService[ScraperFailedRecord, ScraperFailedRecordMapper]):
    """爬虫失败记录服务"""
    
    def __init__(self, 
                 mapper: ScraperFailedRecordMapper = autowired(),
                 id_generator: IdGenerator = autowired()):
        super().__init__(mapper)
        self.id_generator = id_generator
    
    @transactional
    async def record_failure(self, 
                           site_name: str,
                           error: Exception,
                           failure_type: str,
                           failure_stage: str,
                           page_number: Optional[int] = None,
                           item_reference: Optional[str] = None,
                           target_url: Optional[str] = None,
                           failure_data: Optional[Dict[str, Any]] = None,
                           task_id: Optional[str] = None,
                           worker_id: Optional[str] = None,
                           max_retry: int = 3) -> ScraperFailedRecord:
        """记录失败信息"""
        
        # 检查是否已存在相同的失败记录
        existing_record = None
        if item_reference:
            existing_record = await self.find_by_item_reference(site_name, item_reference)
        
        if existing_record and existing_record.retry_status in [RetryStatus.PENDING, RetryStatus.RETRYING]:
            # 更新现有记录
            await self.update_retry_status(
                existing_record.id,
                RetryStatus.PENDING,
                existing_record.retry_count + 1,
                datetime.now(),
                str(error)
            )
            return existing_record
        
        # 创建新的失败记录
        record = ScraperFailedRecord(
            id=await self.id_generator.generate_id(),
            site_name=site_name,
            failure_type=failure_type,
            failure_stage=failure_stage,
            page_number=page_number,
            item_reference=item_reference,
            target_url=target_url,
            error_message=str(error),
            error_code=getattr(error, 'code', None) or error.__class__.__name__,
            stack_trace=traceback.format_exc(),
            retry_count=0,
            max_retry=max_retry,
            retry_status=RetryStatus.PENDING,
            task_id=task_id,
            worker_id=worker_id,
            failure_data=failure_data,
            create_time=datetime.now(),
            update_time=datetime.now()
        )
        
        await self.save(record)
        logger.info(f"记录失败信息: {site_name} - {failure_type} - {item_reference or page_number}")
        return record
    
    async def record_page_extract_failure(self, request: PageExtractFailureRequest) -> ScraperFailedRecord:
        """记录页面提取失败"""
        return await self.record_failure(
            site_name=request.site_name,
            error=request.error,
            failure_type=FailureType.PAGE_EXTRACT,
            failure_stage=FailureStage.LIST_PAGE,
            page_number=request.page_number,
            target_url=request.target_url,
            task_id=request.task_id,
            worker_id=request.worker_id,
            max_retry=request.max_retry
        )
    
    async def record_detail_extract_failure(self, request: DetailExtractFailureRequest) -> ScraperFailedRecord:
        """记录详情提取失败"""
        return await self.record_failure(
            site_name=request.site_name,
            error=request.error,
            failure_type=FailureType.DETAIL_EXTRACT,
            failure_stage=FailureStage.DETAIL_PAGE,
            item_reference=request.item_reference,
            target_url=request.target_url,
            failure_data=request.failure_data,
            task_id=request.task_id,
            worker_id=request.worker_id,
            max_retry=request.max_retry
        )
    
    async def record_data_save_failure(self, request: DataSaveFailureRequest) -> ScraperFailedRecord:
        """记录数据保存失败"""
        return await self.record_failure(
            site_name=request.site_name,
            error=request.error,
            failure_type=FailureType.DATA_SAVE,
            failure_stage=FailureStage.DATABASE,
            item_reference=request.item_reference,
            failure_data=request.failure_data,
            task_id=request.task_id,
            worker_id=request.worker_id,
            max_retry=request.max_retry
        )
    
    async def record_file_download_failure(self, request: FileDownloadFailureRequest) -> ScraperFailedRecord:
        """记录文件下载失败"""
        return await self.record_failure(
            site_name=request.site_name,
            error=request.error,
            failure_type=FailureType.FILE_DOWNLOAD,
            failure_stage=FailureStage.STORAGE,
            item_reference=request.item_reference,
            target_url=request.target_url,
            task_id=request.task_id,
            worker_id=request.worker_id,
            max_retry=request.max_retry
        )
    
    @transactional
    async def mark_retry_success(self, request: RetryRequest) -> bool:
        """标记重试成功"""
        result = await self.update_retry_status(
            request.record_id,
            RetryStatus.SUCCESS,
            last_retry_time=datetime.now()
        )
        logger.info(f"标记重试成功: {request.record_id}")
        return result
    
    @transactional
    async def mark_retry_failed(self, request: RetryRequest) -> bool:
        """标记重试最终失败"""
        result = await self.update_retry_status(
            request.record_id,
            RetryStatus.FAILED,
            last_retry_time=datetime.now(),
            error_message=request.error_message
        )
        logger.info(f"标记重试最终失败: {request.record_id}")
        return result
    
    @transactional
    async def start_retry(self, request: RetryRequest) -> bool:
        """开始重试"""
        result = await self.update_retry_status(
            request.record_id,
            RetryStatus.RETRYING,
            last_retry_time=datetime.now()
        )
        logger.info(f"开始重试: {request.record_id}")
        return result
    
    async def get_pending_retries(self, site_name: str) -> List[ScraperFailedRecord]:
        """获取待重试的记录"""
        return await self.find_pending_retries(site_name, max_retry_exceeded=False)
    
    async def get_failed_records(self, site_name: str) -> List[ScraperFailedRecord]:
        """获取最终失败的记录"""
        return await self.find_by_site_and_status(site_name, RetryStatus.FAILED)
    
    async def get_task_failures(self, task_id: str) -> List[ScraperFailedRecord]:
        """获取任务的所有失败记录"""
        return await self.find_by_task_id(task_id)
    
    async def get_failure_statistics(self, request: FailureQueryRequest) -> Dict[str, Any]:
        """获取失败统计信息"""
        return await self.get_failure_statistics(
            request.site_name, 
            request.start_time, 
            request.end_time
        )
    
    @transactional
    async def cleanup_old_records(self, request: CleanupRequest) -> int:
        """清理旧记录"""
        count = await self.cleanup_old_records(request.site_name, request.days_to_keep)
        logger.info(f"已清理 {count} 条过期失败记录: {request.site_name}")
        return count
