"""
异步队列调度管理模块
负责区域裁剪和异步处理队列管理
"""

import asyncio
import time
import uuid
from typing import List, Dict, Optional, Callable, Any, Tuple
from dataclasses import dataclass, field
from enum import Enum
from queue import Queue, Empty, Full
import threading
import cv2
import numpy as np
from concurrent.futures import ThreadPoolExecutor, Future

from ..core import config, queue_logger
from ..detection import Detection, DetectionResult
from ..video import FrameData


class TaskStatus(Enum):
    """任务状态"""
    PENDING = "pending"
    PROCESSING = "processing"
    COMPLETED = "completed"
    FAILED = "failed"
    CANCELLED = "cancelled"


class TaskPriority(Enum):
    """任务优先级"""
    LOW = 1
    NORMAL = 2
    HIGH = 3
    URGENT = 4


@dataclass
class CroppedRegion:
    """裁剪区域数据"""
    region_id: str
    image: np.ndarray
    bbox: Tuple[int, int, int, int]  # x1, y1, x2, y2
    confidence: float
    timestamp: float
    stream_id: str
    frame_number: int
    original_shape: Tuple[int, int]  # height, width


@dataclass
class ProcessingTask:
    """处理任务"""
    task_id: str
    task_type: str
    data: Any
    priority: TaskPriority = TaskPriority.NORMAL
    status: TaskStatus = TaskStatus.PENDING
    created_at: float = field(default_factory=time.time)
    started_at: Optional[float] = None
    completed_at: Optional[float] = None
    error_message: Optional[str] = None
    retry_count: int = 0
    max_retries: int = 3
    callback: Optional[Callable] = None


class RegionCropper:
    """区域裁剪器"""
    
    def __init__(self):
        self.min_region_size = (32, 32)  # 最小区域尺寸
        self.max_region_size = (1024, 1024)  # 最大区域尺寸
        self.padding = 10  # 裁剪时的边距
        
        queue_logger.info("区域裁剪器初始化完成")
    
    def crop_regions(self, frame_data: FrameData, 
                    detection_result: DetectionResult) -> List[CroppedRegion]:
        """从帧中裁剪检测到的区域"""
        cropped_regions = []
        
        try:
            image = frame_data.frame
            height, width = image.shape[:2]
            
            for i, detection in enumerate(detection_result.detections):
                # 添加边距
                x1, y1, x2, y2 = detection.bbox
                x1 = max(0, x1 - self.padding)
                y1 = max(0, y1 - self.padding)
                x2 = min(width, x2 + self.padding)
                y2 = min(height, y2 + self.padding)
                
                # 检查区域尺寸
                region_width = x2 - x1
                region_height = y2 - y1
                
                if (region_width < self.min_region_size[0] or 
                    region_height < self.min_region_size[1]):
                    queue_logger.debug(f"跳过过小的区域: {region_width}x{region_height}")
                    continue
                
                if (region_width > self.max_region_size[0] or 
                    region_height > self.max_region_size[1]):
                    queue_logger.debug(f"跳过过大的区域: {region_width}x{region_height}")
                    continue
                
                # 裁剪区域
                cropped_image = image[y1:y2, x1:x2].copy()
                
                # 创建裁剪区域对象
                region = CroppedRegion(
                    region_id=f"{frame_data.stream_id}_{frame_data.frame_number}_{i}",
                    image=cropped_image,
                    bbox=(x1, y1, x2, y2),
                    confidence=detection.confidence,
                    timestamp=frame_data.timestamp,
                    stream_id=frame_data.stream_id,
                    frame_number=frame_data.frame_number,
                    original_shape=(height, width)
                )
                
                cropped_regions.append(region)
            
            queue_logger.debug(
                f"从帧 {frame_data.stream_id}:{frame_data.frame_number} "
                f"裁剪了 {len(cropped_regions)} 个区域"
            )
            
        except Exception as e:
            queue_logger.error(f"裁剪区域失败: {e}")
        
        return cropped_regions
    
    def enhance_region(self, image: np.ndarray) -> np.ndarray:
        """增强裁剪区域的图像质量"""
        try:
            # 转换为灰度图
            if len(image.shape) == 3:
                gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            else:
                gray = image.copy()
            
            # 直方图均衡化
            enhanced = cv2.equalizeHist(gray)
            
            # 高斯滤波去噪
            enhanced = cv2.GaussianBlur(enhanced, (3, 3), 0)
            
            # 锐化
            kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
            enhanced = cv2.filter2D(enhanced, -1, kernel)
            
            # 转回BGR格式
            if len(image.shape) == 3:
                enhanced = cv2.cvtColor(enhanced, cv2.COLOR_GRAY2BGR)
            
            return enhanced
            
        except Exception as e:
            queue_logger.error(f"图像增强失败: {e}")
            return image


class AsyncQueueManager:
    """异步队列管理器"""
    
    def __init__(self):
        self.max_queue_size = config.queue.max_queue_size
        self.worker_concurrency = config.queue.worker_concurrency
        self.task_timeout = config.queue.task_timeout
        self.max_retries = config.queue.retry_attempts
        
        # 任务队列
        self.task_queue = Queue(maxsize=self.max_queue_size)
        self.priority_queues = {
            TaskPriority.URGENT: Queue(),
            TaskPriority.HIGH: Queue(),
            TaskPriority.NORMAL: Queue(),
            TaskPriority.LOW: Queue()
        }
        
        # 任务管理
        self.tasks: Dict[str, ProcessingTask] = {}
        self.running_tasks: Dict[str, Future] = {}
        
        # 线程池
        self.executor = ThreadPoolExecutor(max_workers=self.worker_concurrency)
        
        # 控制标志
        self.is_running = False
        self.worker_threads = []
        
        # 统计信息
        self.stats = {
            "total_tasks": 0,
            "completed_tasks": 0,
            "failed_tasks": 0,
            "queue_size": 0,
            "processing_tasks": 0
        }
        
        queue_logger.info("异步队列管理器初始化完成")
    
    def start(self):
        """启动队列管理器"""
        if self.is_running:
            queue_logger.warning("队列管理器已在运行")
            return
        
        self.is_running = True
        
        # 启动工作线程
        for i in range(self.worker_concurrency):
            worker_thread = threading.Thread(
                target=self._worker_loop,
                name=f"QueueWorker-{i}",
                daemon=True
            )
            worker_thread.start()
            self.worker_threads.append(worker_thread)
        
        # 启动统计线程
        stats_thread = threading.Thread(
            target=self._stats_loop,
            name="QueueStats",
            daemon=True
        )
        stats_thread.start()
        self.worker_threads.append(stats_thread)
        
        queue_logger.info(f"队列管理器启动，工作线程数: {self.worker_concurrency}")
    
    def stop(self):
        """停止队列管理器"""
        if not self.is_running:
            return
        
        queue_logger.info("正在停止队列管理器...")
        self.is_running = False
        
        # 等待工作线程结束
        for thread in self.worker_threads:
            if thread.is_alive():
                thread.join(timeout=5)
        
        # 关闭线程池
        self.executor.shutdown(wait=True)
        
        queue_logger.info("队列管理器已停止")
    
    def submit_task(self, task_type: str, data: Any, 
                   priority: TaskPriority = TaskPriority.NORMAL,
                   callback: Optional[Callable] = None) -> str:
        """提交任务到队列"""
        task_id = str(uuid.uuid4())
        
        task = ProcessingTask(
            task_id=task_id,
            task_type=task_type,
            data=data,
            priority=priority,
            callback=callback,
            max_retries=self.max_retries
        )
        
        try:
            # 添加到优先级队列
            self.priority_queues[priority].put_nowait(task)
            self.tasks[task_id] = task
            self.stats["total_tasks"] += 1
            self.stats["queue_size"] += 1
            
            queue_logger.debug(f"提交任务: {task_id}, 类型: {task_type}, 优先级: {priority.name}")
            return task_id
            
        except Full:
            queue_logger.error(f"队列已满，无法提交任务: {task_type}")
            raise RuntimeError("队列已满")
    
    def get_task_status(self, task_id: str) -> Optional[TaskStatus]:
        """获取任务状态"""
        if task_id in self.tasks:
            return self.tasks[task_id].status
        return None
    
    def cancel_task(self, task_id: str) -> bool:
        """取消任务"""
        if task_id not in self.tasks:
            return False
        
        task = self.tasks[task_id]
        
        if task.status == TaskStatus.PENDING:
            task.status = TaskStatus.CANCELLED
            queue_logger.info(f"任务已取消: {task_id}")
            return True
        elif task_id in self.running_tasks:
            future = self.running_tasks[task_id]
            if future.cancel():
                task.status = TaskStatus.CANCELLED
                queue_logger.info(f"运行中的任务已取消: {task_id}")
                return True
        
        return False
    
    def get_queue_stats(self) -> Dict[str, Any]:
        """获取队列统计信息"""
        self.stats["queue_size"] = sum(q.qsize() for q in self.priority_queues.values())
        self.stats["processing_tasks"] = len(self.running_tasks)
        return self.stats.copy()
    
    def _worker_loop(self):
        """工作线程循环"""
        thread_name = threading.current_thread().name
        queue_logger.info(f"工作线程启动: {thread_name}")
        
        while self.is_running:
            try:
                # 按优先级获取任务
                task = self._get_next_task()
                if task is None:
                    time.sleep(0.1)
                    continue
                
                # 处理任务
                self._process_task(task)
                
            except Exception as e:
                queue_logger.error(f"工作线程异常 {thread_name}: {e}")
                time.sleep(1)
        
        queue_logger.info(f"工作线程结束: {thread_name}")
    
    def _get_next_task(self) -> Optional[ProcessingTask]:
        """按优先级获取下一个任务"""
        for priority in [TaskPriority.URGENT, TaskPriority.HIGH, 
                        TaskPriority.NORMAL, TaskPriority.LOW]:
            try:
                task = self.priority_queues[priority].get_nowait()
                self.stats["queue_size"] -= 1
                return task
            except Empty:
                continue
        return None
    
    def _process_task(self, task: ProcessingTask):
        """处理单个任务"""
        task.status = TaskStatus.PROCESSING
        task.started_at = time.time()
        
        try:
            # 提交到线程池执行
            future = self.executor.submit(self._execute_task, task)
            self.running_tasks[task.task_id] = future
            
            # 等待完成或超时
            try:
                result = future.result(timeout=self.task_timeout)
                task.status = TaskStatus.COMPLETED
                task.completed_at = time.time()
                self.stats["completed_tasks"] += 1
                
                # 调用回调函数
                if task.callback:
                    try:
                        task.callback(task.task_id, result)
                    except Exception as e:
                        queue_logger.error(f"任务回调失败 {task.task_id}: {e}")
                
                queue_logger.debug(f"任务完成: {task.task_id}")
                
            except Exception as e:
                self._handle_task_error(task, str(e))
            
        finally:
            # 清理
            if task.task_id in self.running_tasks:
                del self.running_tasks[task.task_id]
    
    def _execute_task(self, task: ProcessingTask) -> Any:
        """执行具体任务"""
        # 这里应该根据task_type调用相应的处理函数
        # 暂时返回空结果
        queue_logger.debug(f"执行任务: {task.task_id}, 类型: {task.task_type}")
        time.sleep(0.1)  # 模拟处理时间
        return {"task_id": task.task_id, "result": "success"}
    
    def _handle_task_error(self, task: ProcessingTask, error_message: str):
        """处理任务错误"""
        task.retry_count += 1
        task.error_message = error_message
        
        if task.retry_count <= task.max_retries:
            # 重试任务
            task.status = TaskStatus.PENDING
            task.started_at = None
            self.priority_queues[task.priority].put_nowait(task)
            self.stats["queue_size"] += 1
            
            queue_logger.warning(
                f"任务失败，重试 {task.retry_count}/{task.max_retries}: "
                f"{task.task_id}, 错误: {error_message}"
            )
        else:
            # 任务最终失败
            task.status = TaskStatus.FAILED
            task.completed_at = time.time()
            self.stats["failed_tasks"] += 1
            
            queue_logger.error(
                f"任务最终失败: {task.task_id}, 错误: {error_message}"
            )
    
    def _stats_loop(self):
        """统计信息循环"""
        while self.is_running:
            try:
                stats = self.get_queue_stats()
                queue_logger.info(
                    f"队列统计 - 总任务: {stats['total_tasks']}, "
                    f"队列中: {stats['queue_size']}, "
                    f"处理中: {stats['processing_tasks']}, "
                    f"已完成: {stats['completed_tasks']}, "
                    f"失败: {stats['failed_tasks']}"
                )
                time.sleep(30)  # 每30秒输出一次统计
            except Exception as e:
                queue_logger.error(f"统计循环异常: {e}")
                time.sleep(5)
