import httpx
import asyncio
import json
import logging
from typing import Dict, Any, Optional, List
from datetime import datetime

from app.config import settings
from app.models.schemas import TaskConfig, TaskStatus

logger = logging.getLogger(__name__)

class CrawlService:
    """爬虫服务 - 与Crawl4AI引擎交互"""
    
    def __init__(self):
        self.api_url = settings.CRAWL4AI_API_URL
        self.api_key = settings.CRAWL4AI_API_KEY
        self.timeout = settings.CRAWL4AI_TIMEOUT
        self.running_tasks: Dict[str, bool] = {}
    
    async def start_crawl(self, task_id: str, config: Dict[str, Any]) -> bool:
        """启动爬取任务"""
        try:
            # 标记任务为运行状态
            self.running_tasks[task_id] = True
            
            # 模拟调用Crawl4AI API启动任务
            # 在实际实现中，这里会调用真实的Crawl4AI API
            crawl_config = self._build_crawl_config(config)
            
            async with httpx.AsyncClient(timeout=self.timeout) as client:
                response = await client.post(
                    f"{self.api_url}/crawl/start",
                    json={
                        "task_id": task_id,
                        "config": crawl_config
                    },
                    headers={"Authorization": f"Bearer {self.api_key}"}
                )
                
                if response.status_code == 200:
                    logger.info(f"Crawl task started successfully: {task_id}")
                    return True
                else:
                    logger.error(f"Failed to start crawl task {task_id}: {response.text}")
                    return False
                    
        except Exception as e:
            logger.error(f"Error starting crawl task {task_id}: {e}")
            self.running_tasks.pop(task_id, None)
            return False
    
    async def stop_crawl(self, task_id: str) -> bool:
        """停止爬取任务"""
        try:
            if task_id not in self.running_tasks:
                return True  # 任务未运行
            
            # 调用Crawl4AI API停止任务
            async with httpx.AsyncClient(timeout=30) as client:
                response = await client.post(
                    f"{self.api_url}/crawl/stop",
                    json={"task_id": task_id},
                    headers={"Authorization": f"Bearer {self.api_key}"}
                )
                
                if response.status_code == 200:
                    self.running_tasks.pop(task_id, None)
                    logger.info(f"Crawl task stopped successfully: {task_id}")
                    return True
                else:
                    logger.error(f"Failed to stop crawl task {task_id}: {response.text}")
                    return False
                    
        except Exception as e:
            logger.error(f"Error stopping crawl task {task_id}: {e}")
            return False
    
    async def get_task_status(self, task_id: str) -> Optional[Dict[str, Any]]:
        """获取任务状态"""
        try:
            async with httpx.AsyncClient(timeout=30) as client:
                response = await client.get(
                    f"{self.api_url}/crawl/status/{task_id}",
                    headers={"Authorization": f"Bearer {self.api_key}"}
                )
                
                if response.status_code == 200:
                    return response.json()
                else:
                    logger.error(f"Failed to get task status {task_id}: {response.text}")
                    return None
                    
        except Exception as e:
            logger.error(f"Error getting task status {task_id}: {e}")
            return None
    
    async def get_task_results(self, task_id: str, page: int = 1, limit: int = 100) -> Optional[Dict[str, Any]]:
        """获取任务结果"""
        try:
            async with httpx.AsyncClient(timeout=60) as client:
                response = await client.get(
                    f"{self.api_url}/crawl/results/{task_id}",
                    params={"page": page, "limit": limit},
                    headers={"Authorization": f"Bearer {self.api_key}"}
                )
                
                if response.status_code == 200:
                    return response.json()
                else:
                    logger.error(f"Failed to get task results {task_id}: {response.text}")
                    return None
                    
        except Exception as e:
            logger.error(f"Error getting task results {task_id}: {e}")
            return None
    
    async def get_task_logs(self, task_id: str, level: str = "all", limit: int = 100) -> Optional[List[Dict[str, Any]]]:
        """获取任务日志"""
        try:
            async with httpx.AsyncClient(timeout=30) as client:
                response = await client.get(
                    f"{self.api_url}/crawl/logs/{task_id}",
                    params={"level": level, "limit": limit},
                    headers={"Authorization": f"Bearer {self.api_key}"}
                )
                
                if response.status_code == 200:
                    return response.json().get("logs", [])
                else:
                    logger.error(f"Failed to get task logs {task_id}: {response.text}")
                    return None
                    
        except Exception as e:
            logger.error(f"Error getting task logs {task_id}: {e}")
            return None
    
    def _build_crawl_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
        """构建Crawl4AI配置"""
        return {
            "urls": config.get("urls", []),
            "strategy": config.get("strategy", "basic"),
            "max_depth": config.get("max_depth", 3),
            "delay_range": [config.get("delay_min", 1.0), config.get("delay_max", 3.0)],
            "concurrent_requests": config.get("concurrent_requests", 5),
            "timeout": config.get("timeout", 30),
            "retry_attempts": config.get("retry_attempts", 3),
            "extraction_rules": config.get("extraction_rules", {}),
            "headers": config.get("headers", {}),
            "cookies": config.get("cookies", {}),
            "proxy_config": config.get("proxy_config"),
        }
    
    def is_task_running(self, task_id: str) -> bool:
        """检查任务是否正在运行"""
        return task_id in self.running_tasks

# 模拟Crawl4AI服务（用于开发测试）
class MockCrawlService(CrawlService):
    """模拟爬虫服务 - 用于开发和测试"""
    
    def __init__(self):
        super().__init__()
        self.mock_results = {}
        self.mock_logs = {}
    
    async def start_crawl(self, task_id: str, config: Dict[str, Any]) -> bool:
        """模拟启动爬取任务"""
        try:
            self.running_tasks[task_id] = True
            
            # 模拟异步爬取过程
            asyncio.create_task(self._simulate_crawl(task_id, config))
            
            logger.info(f"Mock crawl task started: {task_id}")
            return True
            
        except Exception as e:
            logger.error(f"Error starting mock crawl task {task_id}: {e}")
            return False
    
    async def _simulate_crawl(self, task_id: str, config: Dict[str, Any]):
        """模拟爬取过程"""
        try:
            urls = config.get("urls", [])
            total_urls = len(urls)
            
            # 初始化结果和日志
            self.mock_results[task_id] = []
            self.mock_logs[task_id] = []
            
            # 添加开始日志
            self._add_log(task_id, "info", f"Started crawling {total_urls} URLs")
            
            # 模拟爬取每个URL
            for i, url in enumerate(urls):
                if task_id not in self.running_tasks:
                    break  # 任务被停止
                
                # 模拟爬取延迟
                await asyncio.sleep(2)
                
                # 模拟爬取结果
                result = {
                    "url": url,
                    "title": f"Page Title for {url}",
                    "content": f"Mock content for {url}",
                    "extracted_data": {
                        "title": f"Extracted title from {url}",
                        "links": [f"{url}/link1", f"{url}/link2"]
                    },
                    "status_code": 200,
                    "response_time": 1.5,
                    "crawled_at": datetime.now().isoformat()
                }
                
                self.mock_results[task_id].append(result)
                self._add_log(task_id, "info", f"Successfully crawled: {url}")
            
            # 添加完成日志
            if task_id in self.running_tasks:
                self._add_log(task_id, "info", f"Crawling completed. Total: {len(self.mock_results[task_id])} pages")
                self.running_tasks.pop(task_id, None)
            
        except Exception as e:
            self._add_log(task_id, "error", f"Crawling failed: {str(e)}")
            self.running_tasks.pop(task_id, None)
    
    def _add_log(self, task_id: str, level: str, message: str):
        """添加日志"""
        if task_id not in self.mock_logs:
            self.mock_logs[task_id] = []
        
        log_entry = {
            "level": level,
            "message": message,
            "timestamp": datetime.now().isoformat()
        }
        
        self.mock_logs[task_id].append(log_entry)
    
    async def get_task_status(self, task_id: str) -> Optional[Dict[str, Any]]:
        """获取模拟任务状态"""
        if task_id in self.running_tasks:
            progress = min(len(self.mock_results.get(task_id, [])) * 20, 100)
            return {
                "status": "running",
                "progress": progress,
                "pages_crawled": len(self.mock_results.get(task_id, [])),
                "current_url": "https://example.com/current"
            }
        else:
            return {
                "status": "completed" if task_id in self.mock_results else "pending",
                "progress": 100 if task_id in self.mock_results else 0,
                "pages_crawled": len(self.mock_results.get(task_id, [])),
                "current_url": None
            }
    
    async def get_task_results(self, task_id: str, page: int = 1, limit: int = 100) -> Optional[Dict[str, Any]]:
        """获取模拟任务结果"""
        results = self.mock_results.get(task_id, [])
        start = (page - 1) * limit
        end = start + limit
        
        return {
            "results": results[start:end],
            "total": len(results),
            "page": page,
            "limit": limit
        }
    
    async def get_task_logs(self, task_id: str, level: str = "all", limit: int = 100) -> Optional[List[Dict[str, Any]]]:
        """获取模拟任务日志"""
        logs = self.mock_logs.get(task_id, [])
        
        if level != "all":
            logs = [log for log in logs if log["level"] == level]
        
        return logs[-limit:] if logs else []
