"""
Scrapyd服务集成
提供对Scrapyd爬虫服务器的API访问和管理
"""
import json
import asyncio
from typing import Dict, Any, List, Optional
from datetime import datetime
import httpx
from urllib.parse import urljoin

from app.core.config import settings
from app.core.logging import get_logger

logger = get_logger(__name__)


class ScrapydService:
    """Scrapyd服务管理类"""
    
    def __init__(self):
        # 使用默认值避免配置项不存在时出错
        self.base_url = getattr(settings, 'SCRAPYD_BASE_URL', 'http://localhost:6800')
        # 从URL中解析主机和端口，或者使用默认值
        try:
            from urllib.parse import urlparse
            parsed = urlparse(self.base_url)
            self.host = parsed.hostname or 'localhost'
            self.port = parsed.port or 6800
        except:
            self.host = 'localhost'
            self.port = 6800
        
        self.project_name = getattr(settings, 'SCRAPYD_PROJECT_NAME', 'default') or "default"
        self.timeout = 30
        
    async def _make_request(self, endpoint: str, data: Optional[Dict] = None, method: str = "GET") -> Dict[str, Any]:
        """发起HTTP请求到Scrapyd服务器"""
        url = urljoin(self.base_url, endpoint)
        
        try:
            async with httpx.AsyncClient(timeout=self.timeout) as client:
                if method.upper() == "GET":
                    response = await client.get(url, params=data)
                else:
                    response = await client.post(url, data=data)
                
                response.raise_for_status()
                return response.json()
                
        except httpx.HTTPStatusError as e:
            logger.error(f"Scrapyd HTTP error for {endpoint}: {e.response.text}")
            return {"status": "error", "message": f"HTTP error: {e.response.status_code}"}
            
        except httpx.RequestError as e:
            logger.error(f"Scrapyd connection error for {endpoint}: {e}")
            return {"status": "error", "message": f"Connection error: {str(e)}"}
            
        except Exception as e:
            logger.error(f"Scrapyd unexpected error for {endpoint}: {e}")
            return {"status": "error", "message": f"Unexpected error: {str(e)}"}
    
    async def get_daemon_status(self) -> Dict[str, Any]:
        """获取Scrapyd服务器状态"""
        return await self._make_request("daemonstatus.json")
    
    async def get_projects(self) -> Dict[str, Any]:
        """获取所有项目列表"""
        return await self._make_request("listprojects.json")
    
    async def get_spiders(self, project: Optional[str] = None) -> Dict[str, Any]:
        """获取指定项目的爬虫列表"""
        project_name = project or self.project_name
        return await self._make_request(f"listspiders.json?project={project_name}")
    
    async def get_jobs(self, project: Optional[str] = None) -> Dict[str, Any]:
        """获取指定项目的任务列表"""
        project_name = project or self.project_name
        return await self._make_request(f"listjobs.json?project={project_name}")
    
    async def schedule_spider(
        self, 
        spider_name: str, 
        project: Optional[str] = None,
        settings: Optional[Dict[str, Any]] = None,
        spider_args: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """调度爬虫运行"""
        project_name = project or self.project_name
        
        data = {"project": project_name, "spider": spider_name}
        
        # 添加爬虫参数
        if spider_args:
            for key, value in spider_args.items():
                data[key] = str(value)
        
        # 添加设置
        if settings:
            data["setting"] = json.dumps(settings)
        
        return await self._make_request("schedule.json", data=data, method="POST")
    
    async def cancel_job(self, job_id: str, project: Optional[str] = None) -> Dict[str, Any]:
        """取消正在运行的任务"""
        project_name = project or self.project_name
        
        data = {"project": project_name, "job": job_id}
        return await self._make_request("cancel.json", data=data, method="POST")
    
    async def get_job_log(self, job_id: str, project: Optional[str] = None) -> Dict[str, Any]:
        """获取任务日志"""
        project_name = project or self.project_name
        
        # 首先尝试获取完整的日志
        # 注意：这里需要spider_name，但我们可能没有这个信息
        # 我们可以尝试从jobs列表中找到对应的spider_name
        try:
            # 获取jobs列表以找到spider_name
            jobs_response = await self.get_jobs(project_name)
            if jobs_response.get("status") == "ok":
                # 在pending, running, finished中查找job_id
                spider_name = None
                for status in ["pending", "running", "finished"]:
                    for job in jobs_response.get(status, []):
                        if job.get("id") == job_id and "spider" in job:
                            spider_name = job["spider"]
                            break
                    if spider_name:
                        break
                
                if spider_name:
                    log_url = f"logs/{project_name}/{spider_name}/{job_id}.log"
                    async with httpx.AsyncClient(timeout=self.timeout) as client:
                        response = await client.get(urljoin(self.base_url, log_url))
                        if response.status_code == 200:
                            return {
                                "status": "success", 
                                "log": response.text,
                                "job_id": job_id
                            }
        except Exception as e:
            logger.warning(f"Failed to get full log for job {job_id}: {e}")
        
        # 如果获取完整日志失败，返回基本信息
        return {
            "status": "partial",
            "message": "Log file not available, checking job status",
            "job_id": job_id
        }
    
    async def deploy_project(
        self, 
        egg_file_path: str, 
        project: Optional[str] = None,
        version: Optional[str] = None
    ) -> Dict[str, Any]:
        """部署项目到Scrapyd服务器"""
        project_name = project or self.project_name
        
        if not version:
            version = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        data = {"project": project_name, "version": version}
        
        try:
            async with httpx.AsyncClient(timeout=self.timeout) as client:
                with open(egg_file_path, "rb") as egg_file:
                    files = {"egg": egg_file}
                    response = await client.post(
                        urljoin(self.base_url, "addversion.json"),
                        data=data,
                        files=files
                    )
                
                response.raise_for_status()
                return response.json()
                
        except Exception as e:
            logger.error(f"Failed to deploy project: {e}")
            return {"status": "error", "message": f"Deployment failed: {str(e)}"}
    
    async def delete_project(self, project: str) -> Dict[str, Any]:
        """删除项目"""
        data = {"project": project}
        return await self._make_request("delproject.json", data=data, method="POST")
    
    async def delete_version(self, project: str, version: str) -> Dict[str, Any]:
        """删除项目版本"""
        data = {"project": project, "version": version}
        return await self._make_request("delversion.json", data=data, method="POST")
    
    async def get_project_versions(self, project: str) -> Dict[str, Any]:
        """获取项目版本列表"""
        return await self._make_request(f"listversions.json?project={project}")


class ScrapydWebIntegration:
    """ScrapydWeb集成管理"""
    
    def __init__(self):
        self.scrapyd_service = ScrapydService()
    
    async def get_dashboard_data(self) -> Dict[str, Any]:
        """获取仪表板数据"""
        try:
            # 并行获取所有状态信息
            daemon_status, projects, jobs = await asyncio.gather(
                self.scrapyd_service.get_daemon_status(),
                self.scrapyd_service.get_projects(),
                self.scrapyd_service.get_jobs()
            )
            
            return {
                "daemon_status": daemon_status,
                "projects": projects,
                "jobs": jobs,
                "timestamp": datetime.now().isoformat()
            }
            
        except Exception as e:
            logger.error(f"Failed to get dashboard data: {e}")
            return {"error": str(e)}
    
    async def get_spider_stats(self, project: str, spider_name: str) -> Dict[str, Any]:
        """获取爬虫统计信息"""
        try:
            jobs_data = await self.scrapyd_service.get_jobs(project)
            
            if jobs_data.get("status") == "ok":
                spider_jobs = []
                for job_type in ["pending", "running", "finished"]:
                    for job in jobs_data.get(job_type, []):
                        if job.get("spider") == spider_name:
                            job["type"] = job_type
                            spider_jobs.append(job)
                
                return {
                    "spider_name": spider_name,
                    "total_jobs": len(spider_jobs),
                    "jobs": spider_jobs,
                    "project": project
                }
            
            return {"error": "Failed to get job data"}
            
        except Exception as e:
            logger.error(f"Failed to get spider stats: {e}")
            return {"error": str(e)}
    
    async def schedule_spider_with_webhook(
        self,
        spider_name: str,
        project: Optional[str] = None,
        settings: Optional[Dict[str, Any]] = None,
        spider_args: Optional[Dict[str, Any]] = None,
        webhook_url: Optional[str] = None
    ) -> Dict[str, Any]:
        """调度爬虫并设置Webhook回调"""
        try:
            # 如果有Webhook URL，添加到爬虫参数中
            if webhook_url:
                if spider_args is None:
                    spider_args = {}
                spider_args["WEBHOOK_URL"] = webhook_url
            
            result = await self.scrapyd_service.schedule_spider(
                spider_name, project, settings, spider_args
            )
            
            # 记录调度信息
            if result.get("status") == "ok":
                logger.info(f"Scheduled spider {spider_name} with job ID: {result.get('jobid')}")
            
            return result
            
        except Exception as e:
            logger.error(f"Failed to schedule spider with webhook: {e}")
            return {"status": "error", "message": str(e)}


# 创建全局服务实例
scrapyd_service = ScrapydService()
scrapyd_web_integration = ScrapydWebIntegration()