import asyncio
import json
from typing import Dict, Any, List, Optional
from datetime import datetime
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, update, delete, and_
from sqlalchemy.orm import selectinload

from app.models.crawler_job import CrawlerJob, CrawledPage, ScheduledCrawlerTask, CrawlerJobStatus
from app.schemas.crawler import (
    CrawlerJobCreate, CrawlerJobUpdate, CrawlerConfigBase,
    ScheduledCrawlerTaskCreate, ScheduledCrawlerTaskUpdate,
    CrawlerExecuteRequest
)
from app.tools.crawler import crawler_tool
from app.tools.optimized_crawler import optimized_crawler_tool
from app.core.logging import get_logger

logger = get_logger(__name__)


class CrawlerService:
    """爬虫服务类"""
    
    def __init__(self, db: AsyncSession):
        self.db = db
    
    async def create_job(self, job_data: CrawlerJobCreate, user_id: int) -> CrawlerJob:
        """创建爬虫任务"""
        # 转换配置为字典
        config_dict = job_data.config.model_dump() if job_data.config else {}
        
        job = CrawlerJob(
            user_id=user_id,
            name=job_data.name,
            description=job_data.description,
            url=str(job_data.url),
            config=config_dict,
            status=CrawlerJobStatus.PENDING
        )
        
        self.db.add(job)
        await self.db.commit()
        await self.db.refresh(job)
        
        logger.info(f"Created crawler job {job.id} for user {user_id}")
        return job
    
    async def get_job(self, job_id: int, user_id: int) -> Optional[CrawlerJob]:
        """获取爬虫任务"""
        stmt = select(CrawlerJob).where(
            and_(CrawlerJob.id == job_id, CrawlerJob.user_id == user_id)
        ).options(selectinload(CrawlerJob.crawled_pages))
        
        result = await self.db.execute(stmt)
        return result.scalar_one_or_none()
    
    async def get_jobs(
        self, 
        user_id: int, 
        skip: int = 0, 
        limit: int = 100,
        status: Optional[CrawlerJobStatus] = None
    ) -> tuple[List[CrawlerJob], int]:
        """获取用户的爬虫任务列表"""
        # 构建查询条件
        conditions = [CrawlerJob.user_id == user_id]
        if status:
            conditions.append(CrawlerJob.status == status)
        
        # 查询总数
        count_stmt = select(CrawlerJob).where(and_(*conditions))
        count_result = await self.db.execute(count_stmt)
        total = len(count_result.scalars().all())
        
        # 查询数据
        stmt = select(CrawlerJob).where(and_(*conditions)).offset(skip).limit(limit)
        result = await self.db.execute(stmt)
        jobs = result.scalars().all()
        
        return list(jobs), total
    
    async def update_job(self, job_id: int, user_id: int, job_data: CrawlerJobUpdate) -> Optional[CrawlerJob]:
        """更新爬虫任务"""
        job = await self.get_job(job_id, user_id)
        if not job:
            return None
        
        # 更新字段
        if job_data.name is not None:
            job.name = job_data.name  # type: ignore
        if job_data.description is not None:
            job.description = job_data.description  # type: ignore
        if job_data.config is not None:
            job.config = job_data.config.model_dump()  # type: ignore
        
        job.updated_at = datetime.utcnow()  # type: ignore
        
        await self.db.commit()
        await self.db.refresh(job)
        
        logger.info(f"Updated crawler job {job_id}")
        return job
    
    async def delete_job(self, job_id: int, user_id: int) -> bool:
        """删除爬虫任务"""
        job = await self.get_job(job_id, user_id)
        if not job:
            return False
        
        await self.db.delete(job)
        await self.db.commit()
        
        logger.info(f"Deleted crawler job {job_id}")
        return True
    
    async def execute_job(self, job_id: int, user_id: int) -> Dict[str, Any]:
        """执行爬虫任务"""
        job = await self.get_job(job_id, user_id)
        if not job:
            return {"success": False, "error": "Job not found"}
        
        if job.status == CrawlerJobStatus.RUNNING:  # type: ignore
            return {"success": False, "error": "Job is already running"}
        
        # 更新状态为运行中
        job.status = CrawlerJobStatus.RUNNING  # type: ignore
        job.started_at = datetime.utcnow()  # type: ignore
        await self.db.commit()
        
        try:
            # 解析配置
            config_dict = job.config if job.config else {}  # type: ignore
            config = CrawlerConfigBase(**config_dict) if config_dict else CrawlerConfigBase()  # type: ignore
            
            # 使用优化的爬虫工具执行爬虫
            async with optimized_crawler_tool as crawler:
                if config.max_depth and config.max_depth > 1:
                    # 深度爬取
                    results = await crawler.crawl_with_depth(
                        str(job.url),  # type: ignore
                        config,
                        max_depth=config.max_depth,
                        max_pages=config.max_pages or 10
                    )
                else:
                    # 单页爬取
                    result = await crawler.crawl_single_page(str(job.url), config)  # type: ignore
                    results = [result]
            
            # 保存爬取结果
            pages_crawled = 0
            total_content_size = 0
            
            for result in results:
                if result.get("success", False):
                    page = CrawledPage(
                        job_id=job.id,
                        url=result["url"],
                        title=result.get("title", ""),
                        content=result.get("content", ""),
                        status_code=result.get("status_code"),
                        content_type=result.get("metadata", {}).get("content_type"),
                        content_length=len(result.get("content", "")),
                        extracted_data=result.get("custom_data"),
                        links=result.get("links", []),
                        images=result.get("images", [])
                    )
                    self.db.add(page)
                    pages_crawled += 1
                    total_content_size += len(result.get("content", ""))
            
            # 更新任务状态
            job.status = CrawlerJobStatus.COMPLETED  # type: ignore
            job.completed_at = datetime.utcnow()  # type: ignore
            job.pages_crawled = pages_crawled  # type: ignore
            job.total_content_size = total_content_size  # type: ignore
            job.result_data = {  # type: ignore
                "summary": {
                    "pages_crawled": pages_crawled,
                    "total_content_size": total_content_size,
                    "execution_time": (job.completed_at - job.started_at).total_seconds()
                },
                "performance_metrics": optimized_crawler_tool.get_performance_metrics()
            }
            
            await self.db.commit()
            
            logger.info(f"Successfully executed crawler job {job_id}, crawled {pages_crawled} pages")
            return {
                "success": True,
                "pages_crawled": pages_crawled,
                "total_content_size": total_content_size
            }
            
        except Exception as e:
            # 更新状态为失败
            job.status = CrawlerJobStatus.FAILED  # type: ignore
            job.completed_at = datetime.utcnow()  # type: ignore
            job.error_message = str(e)  # type: ignore
            await self.db.commit()
            
            logger.error(f"Failed to execute crawler job {job_id}: {e}")
            return {"success": False, "error": str(e)}
    
    async def execute_immediate(self, request: CrawlerExecuteRequest) -> Dict[str, Any]:
        """立即执行爬虫（不保存到数据库）"""
        try:
            config = request.config or CrawlerConfigBase()
            
            # 使用优化的爬虫工具执行爬虫
            async with optimized_crawler_tool as crawler:
                result = await crawler.crawl_single_page(str(request.url), config)
            
            return {
                "success": result.get("success", False),
                "data": result,
                "pages_crawled": 1 if result.get("success", False) else 0,
                "execution_time": result.get("execution_time", 0.0),
                "performance_metrics": optimized_crawler_tool.get_performance_metrics()
            }
            
        except Exception as e:
            logger.error(f"Failed to execute immediate crawl: {e}")
            return {
                "success": False,
                "error": str(e),
                "pages_crawled": 0,
                "execution_time": 0.0
            }
    
    # 定时任务相关方法
    async def create_scheduled_task(
        self, 
        task_data: ScheduledCrawlerTaskCreate, 
        user_id: int
    ) -> ScheduledCrawlerTask:
        """创建定时爬虫任务"""
        config_dict = task_data.config.model_dump() if task_data.config else {}
        
        task = ScheduledCrawlerTask(
            user_id=user_id,
            name=task_data.name,
            description=task_data.description,
            url=str(task_data.url),
            cron_expression=task_data.cron_expression,
            timezone=task_data.timezone,
            config=config_dict
        )
        
        self.db.add(task)
        await self.db.commit()
        await self.db.refresh(task)
        
        logger.info(f"Created scheduled crawler task {task.id} for user {user_id}")
        return task
    
    async def get_scheduled_task(self, task_id: int, user_id: int) -> Optional[ScheduledCrawlerTask]:
        """获取定时爬虫任务"""
        stmt = select(ScheduledCrawlerTask).where(
            and_(ScheduledCrawlerTask.id == task_id, ScheduledCrawlerTask.user_id == user_id)
        )
        result = await self.db.execute(stmt)
        return result.scalar_one_or_none()
    
    async def get_scheduled_tasks(
        self, 
        user_id: int, 
        skip: int = 0, 
        limit: int = 100,
        is_active: Optional[bool] = None
    ) -> tuple[List[ScheduledCrawlerTask], int]:
        """获取定时爬虫任务列表"""
        conditions = [ScheduledCrawlerTask.user_id == user_id]
        if is_active is not None:
            conditions.append(ScheduledCrawlerTask.is_active == is_active)
        
        # 查询总数
        count_stmt = select(ScheduledCrawlerTask).where(and_(*conditions))
        count_result = await self.db.execute(count_stmt)
        total = len(count_result.scalars().all())
        
        # 查询数据
        stmt = select(ScheduledCrawlerTask).where(and_(*conditions)).offset(skip).limit(limit)
        result = await self.db.execute(stmt)
        tasks = result.scalars().all()
        
        return list(tasks), total
    
    async def update_scheduled_task(
        self, 
        task_id: int, 
        user_id: int, 
        task_data: ScheduledCrawlerTaskUpdate
    ) -> Optional[ScheduledCrawlerTask]:
        """更新定时爬虫任务"""
        task = await self.get_scheduled_task(task_id, user_id)
        if not task:
            return None
        
        # 更新字段
        if task_data.name is not None:
            task.name = task_data.name  # type: ignore
        if task_data.description is not None:
            task.description = task_data.description  # type: ignore
        if task_data.cron_expression is not None:
            task.cron_expression = task_data.cron_expression  # type: ignore
        if task_data.timezone is not None:
            task.timezone = task_data.timezone  # type: ignore
        if task_data.config is not None:
            task.config = task_data.config.model_dump()  # type: ignore
        if task_data.is_active is not None:
            task.is_active = task_data.is_active  # type: ignore
        
        task.updated_at = datetime.utcnow()  # type: ignore
        
        await self.db.commit()
        await self.db.refresh(task)
        
        logger.info(f"Updated scheduled crawler task {task_id}")
        return task
    
    async def delete_scheduled_task(self, task_id: int, user_id: int) -> bool:
        """删除定时爬虫任务"""
        task = await self.get_scheduled_task(task_id, user_id)
        if not task:
            return False
        
        await self.db.delete(task)
        await self.db.commit()
        
        logger.info(f"Deleted scheduled crawler task {task_id}")
        return True