"""
任务管理类 - 管理RPA任务的创建、执行、监控和超时控制
"""
import asyncio
import uuid
from typing import Dict, Any, Optional, List
from datetime import datetime, timedelta
from core.utils.logger_manager import LoggerManager
from database.connection import db_manager
from database.repositories import TaskRepository
from services.rpa_service import get_rpa_service


class TaskManager:
    """任务管理类"""
    
    def __init__(self):
        self.logger = LoggerManager.get_logger("task_manager")
        self.rpa_service = get_rpa_service()
        self.active_tasks: Dict[str, Dict[str, Any]] = {}
        self.task_timeout = 3600  # 1小时超时
        
    async def create_task(self,  facebook_params: Dict[str, Any] = None, 
                         linkedin_params: Dict[str, Any] = None, 
                         proxy_config: Dict[str, str] = None) -> str:
        """创建新任务"""
        try:
            # 生成任务ID
            task_id = str(uuid.uuid4())
            
            # 创建任务记录
            task_data = {
                "task_id": task_id,
                "topic": {'facebook':facebook_params,'linkedin':linkedin_params},
                "status": "running",
                "steps": {},
                "results": None,
            }
            
            # 保存到数据库
            async for db_session in db_manager.get_session():
                task_repo = TaskRepository(db_session)
                await task_repo.create(task_data)
                break
            
            # 添加到活跃任务列表
            self.active_tasks[task_id] = {
                "task_id": task_id,
                "status": "running",
                "created_at": datetime.utcnow(),
                "timeout_at": self.task_timeout,
                "facebook_params": facebook_params,
                "linkedin_params": linkedin_params,
               
            }
            
            self.logger.info(f"任务创建成功: {task_id}")
            return task_id
            
        except Exception as e:
            self.logger.error(f"创建任务失败: {str(e)}")
            raise
    
    async def start_task(self, task_id: str) -> bool:
        """启动任务执行"""
        try:
            if task_id not in self.active_tasks:
                self.logger.error(f"任务不存在: {task_id}")
                return False
            
            # 启动任务执行协程，不等待完成（异步执行）
            asyncio.create_task(self._execute_task_with_timeout(task_id))
            
            self.logger.info(f"任务启动成功: {task_id}")
            return True
            
        except Exception as e:
            self.logger.error(f"启动任务失败: {task_id}, 错误: {str(e)}")
            await self.update_task_status(task_id, "failed", {"error": str(e)})
            return False
    
    async def _execute_task_with_timeout(self, task_id: str):
        """执行任务并处理超时"""
        try:
            
            results = await  self._execute_task(task_id)
                
            crawl_data_final=[]
            all_success = True

            for i, result in enumerate(results):
                if isinstance(result, Exception) or (isinstance(result, dict) and result.get('status') == 'failed'):
                    all_success = False
                
                # 处理结果数据，确保所有对象都可以JSON序列化
                if 'results' in result:
                    serialized_results = self._serialize_results(result['results'])
                    crawl_data_final.append(serialized_results)

            status = "success" if all_success else "failed"
            combined_result = {
                "task_id": task_id,
                "status": status,
                "results": crawl_data_final,
                "step": {}
            }
            await self.update_task_status(task_id, status, crawl_data_final)
                    
            await self._cleanup_task(task_id)
                
        except Exception as e:
            self.logger.error(f"任务执行异常: {task_id}, 错误: {str(e)}")
            await self.update_task_status(task_id, "failed", {"error": str(e)})
            await self._cleanup_task(task_id)
    
    async def _execute_task(self, task_id: str) -> Optional[Dict[str, Any]]:
        """执行具体任务"""
        try:
            task_info = self.active_tasks.get(task_id)
            if not task_info:
                return None
            # 执行并发任务
            result = await self.rpa_service.run_concurrent_tasks(
                task_id=task_id,
                facebook_params=task_info.get('facebook_params'),
                linkedin_params=task_info.get('linkedin_params')
            )
            
            return result
            
        except Exception as e:
            self.logger.error(f"执行任务失败: {task_id}, 错误: {str(e)}")
            return {"error": str(e)}
    
    async def _wait_for_timeout(self, task_id: str, timeout_at: datetime):
        """等待超时"""
        try:
            while datetime.utcnow() < timeout_at:
                await asyncio.sleep(1)
                
                # 检查任务是否已被取消
                if task_id not in self.active_tasks:
                    return
                
                task_info = self.active_tasks[task_id]
                if task_info['status'] in ['completed', 'failed', 'cancelled']:
                    return
            
            # 超时
            self.logger.warning(f"任务超时: {task_id}")
            
        except asyncio.CancelledError:
            pass
    
    async def cancel_task(self, task_id: str) -> bool:
        """取消任务"""
        try:
            if task_id not in self.active_tasks:
                self.logger.error(f"任务不存在: {task_id}")
                return False
            
            task_info = self.active_tasks[task_id]
            if task_info['status'] in ['completed', 'failed', 'cancelled']:
                self.logger.warning(f"任务已完成，无法取消: {task_id}")
                return False
            
            # 更新任务状态
            await self.update_task_status(task_id, "cancelled", {"error": "任务被取消"})
            
            # 清理资源
            await self._cleanup_task(task_id)
            
            self.logger.info(f"任务已取消: {task_id}")
            return True
            
        except Exception as e:
            self.logger.error(f"取消任务失败: {task_id}, 错误: {str(e)}")
            return False
    
    async def update_task_status(self, task_id: str, status: str, results: Dict[str, Any] = None):
        """更新任务状态"""
        try:
            # 更新内存中的任务状态
            if task_id in self.active_tasks:
                self.active_tasks[task_id]['status'] = status
                self.active_tasks[task_id]['updated_at'] = datetime.utcnow()
                if results:
                    self.active_tasks[task_id]['results'] = results
            
            # 检查和截断过大的results数据
            truncated_results = self._truncate_results(results) if results else None
            
            # 更新数据库中的任务状态
            async for db_session in db_manager.get_session():
                task_repo = TaskRepository(db_session)
                await task_repo.update_status(task_id, status, truncated_results)
                break
            
            self.logger.info(f"任务状态已更新: {task_id}, 状态: {status}")
            
        except Exception as e:
            self.logger.error(f"更新任务状态失败: {task_id}, 错误: {str(e)}")
    
    async def get_task_info(self, task_id: str) -> Optional[Dict[str, Any]]:
        """获取任务信息"""
        try:
            # 先从内存获取
            if task_id in self.active_tasks:
                return self.active_tasks[task_id]
            
            # 从数据库获取
            async for db_session in db_manager.get_session():
                task_repo = TaskRepository(db_session)
                task = await task_repo.get_by_task_id(task_id)
                if task:
                    return {
                        "task_id": task.task_id,
                        "topic": task.topic,
                        "status": task.status,
                        "steps": task.steps,
                        "results": task.results,
                        "created_at": task.created_at,
                        "updated_at": task.updated_at
                    }
                break
            
            return None
            
        except Exception as e:
            self.logger.error(f"获取任务信息失败: {task_id}, 错误: {str(e)}")
            return None
    
    async def get_active_tasks(self) -> List[Dict[str, Any]]:
        """获取所有活跃任务"""
        return list(self.active_tasks.values())
    
    async def get_recent_tasks(self, limit: int = 50) -> List[Dict[str, Any]]:
        """获取最近的任务"""
        try:
            async for db_session in db_manager.get_session():
                task_repo = TaskRepository(db_session)
                tasks = await task_repo.get_recent_tasks(limit)
                return [
                    {
                        "task_id": task.task_id,
                        "topic": task.topic,
                        "status": task.status,
                        "steps": task.steps,
                        "results": task.results,
                        "created_at": task.created_at,
                        "updated_at": task.updated_at
                    }
                    for task in tasks
                ]
        except Exception as e:
            self.logger.error(f"获取最近任务失败: {str(e)}")
            return []
    
    async def _cleanup_task(self, task_id: str):
        """清理任务资源"""
        try:
            # 清理RPA服务资源
            await self.rpa_service.cleanup_browsers()
            
            # 从活跃任务列表中移除
            if task_id in self.active_tasks:
                del self.active_tasks[task_id]
            
            self.logger.info(f"任务资源清理完成: {task_id}")
            
        except Exception as e:
            self.logger.error(f"清理任务资源失败: {task_id}, 错误: {str(e)}")
    
    def _serialize_results(self, results):
        """序列化结果数据，确保所有对象都可以JSON序列化"""
        try:
            if isinstance(results, list):
                serialized_list = []
                for item in results:
                    if hasattr(item, 'dict'):  # Pydantic模型对象
                        serialized_list.append(item.dict())
                    elif hasattr(item, '__dict__'):  # 普通对象
                        serialized_list.append(item.__dict__)
                    else:
                        serialized_list.append(item)
                return serialized_list
            elif hasattr(results, 'dict'):  # 单个Pydantic模型对象
                return results.dict()
            elif hasattr(results, '__dict__'):  # 单个普通对象
                return results.__dict__
            else:
                return results
        except Exception as e:
            self.logger.warning(f"序列化结果数据失败: {str(e)}")
            return str(results)  # 如果序列化失败，转换为字符串

    def _truncate_results(self, results):
        """截断过大的结果数据以防止数据库错误"""
        try:
            import json
            
            # 序列化为JSON字符串检查大小
            json_str = json.dumps(results, ensure_ascii=False)
            max_size = 1 * 1024 * 1024  # 1MB限制，适应MySQL默认max_allowed_packet
            
            if len(json_str) <= max_size:
                return results
                
            self.logger.warning(f"结果数据过大({len(json_str)}字节)，正在截断...")
            
            # 如果是列表，逐个截断
            if isinstance(results, list):
                truncated_list = []
                current_size = 0
                for item in results:
                    item_str = json.dumps(item, ensure_ascii=False)
                    if current_size + len(item_str) > max_size:
                        break
                    truncated_list.append(item)
                    current_size += len(item_str)
                
                # 添加截断标记
                if len(truncated_list) < len(results):
                    truncated_list.append({
                        "_truncated": True,
                        "_original_count": len(results),
                        "_truncated_count": len(truncated_list),
                        "_message": f"数据过大，已截断。原始:{len(results)}项，保留:{len(truncated_list)}项"
                    })
                return truncated_list
            
            # 如果是字典，尝试截断部分字段
            elif isinstance(results, dict):
                truncated_dict = {}
                current_size = 0
                
                for key, value in results.items():
                    item_str = json.dumps({key: value}, ensure_ascii=False)
                    if current_size + len(item_str) > max_size:
                        truncated_dict["_truncated"] = True
                        truncated_dict["_message"] = f"数据过大，字段'{key}'及后续字段已截断"
                        break
                    truncated_dict[key] = value
                    current_size += len(item_str)
                    
                return truncated_dict
            
            # 其他类型，转换为字符串并截断
            else:
                result_str = str(results)
                if len(result_str) > max_size:
                    truncated_str = result_str[:max_size-100] + "...[数据过大已截断]"
                    return {"_truncated_string": truncated_str}
                return results
                
        except Exception as e:
            self.logger.error(f"截断结果数据失败: {str(e)}")
            # 返回简化的错误信息
            return {
                "_error": f"数据处理失败: {str(e)}",
                "_original_type": str(type(results))
            }

    async def cleanup_expired_tasks(self):
        """清理过期任务"""
        try:
            current_time = datetime.utcnow()
            expired_tasks = []
            
            for task_id, task_info in self.active_tasks.items():
                if current_time > task_info['timeout_at']:
                    expired_tasks.append(task_id)
            
            for task_id in expired_tasks:
                self.logger.warning(f"清理过期任务: {task_id}")
                await self.update_task_status(task_id, "timeout", {"error": "任务超时"})
                await self._cleanup_task(task_id)
            
            if expired_tasks:
                self.logger.info(f"清理了 {len(expired_tasks)} 个过期任务")
                
        except Exception as e:
            self.logger.error(f"清理过期任务失败: {str(e)}")


# 全局任务管理器实例
task_manager = TaskManager()


def get_task_manager() -> TaskManager:
    """获取任务管理器实例"""
    return task_manager
