from ultralytics import YOLO
import numpy as np
from typing import Optional, Tuple, Dict
import queue
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed

from utils import *

logger = setup_logger()

SEG_MODEL_PATH = "./merge_yolo11n_seg_rknn_model"
POSE_MODEL_PATH = "./merge_yolo11n_pose_rknn_model"

class ImageData:
    """图像数据结构"""
    def __init__(self):
        self.timestamp: float = 0.0
        self.readable_timestamp: str = ""
        self.color_img: np.ndarray = None
        self.depth_img: np.ndarray = None
        self.color_path: Optional[str] = None

class SegPose:
    def __init__(self, seg_workers=2, pose_workers=2, max_queue_size=30):
        # 验证硬件并发能力
        self._check_hardware_concurrency()
        
        # 设置关闭标志
        self.shutdown_flag = False
        
        # 共享模型实例（减少资源消耗）
        self.seg_model = YOLO(SEG_MODEL_PATH, task="segment")
        self.pose_model = YOLO(POSE_MODEL_PATH, task="pose")
        
        # 创建线程池
        self.executor = ThreadPoolExecutor(
            max_workers=seg_workers + pose_workers + 2  # +2 用于管理和结果收集
        )
        
        # 任务队列（增加大小）
        self.task_queue = queue.Queue(maxsize=max_queue_size)
        self.result_queue = queue.Queue(maxsize=max_queue_size)
        
        # 任务管理
        self.task_counter = 0
        self.lock = threading.Lock()
        self.pending_tasks: Dict[int, Tuple[threading.Event, ImageData]] = {}
        
        # 启动多个任务处理器
        for _ in range(2):  # 增加任务分发线程
            self.executor.submit(self._task_dispatcher)
        
        # 启动结果收集器
        self.executor.submit(self._result_collector)
        
        # 性能监控
        self.processed_count = 0
        self.last_log_time = time.time()
    
    def _check_hardware_concurrency(self):
        """检查硬件并发能力并记录"""
        # 在实际应用中，这里可以添加NPU能力检测代码
        logger.info("Assuming NPU max concurrency: 4 tasks (2 seg + 2 pose)")
    
    def put(self, image_data: ImageData) -> int:
        """添加新任务，返回任务ID"""
        with self.lock:
            task_id = self.task_counter
            self.task_counter += 1
            self.pending_tasks[task_id] = (threading.Event(), image_data)
        
        try:
            self.task_queue.put((task_id, image_data), timeout=0.1)
            return task_id
        except queue.Full:
            with self.lock:
                del self.pending_tasks[task_id]
            return -1
    
    def _task_dispatcher(self):
        """任务分发线程（多个实例并行运行）"""
        while not self.shutdown_flag:
            try:
                task_id, image_data = self.task_queue.get(timeout=0.5)
                
                # 提交处理任务
                seg_future = self.executor.submit(
                    self._process_segmentation, 
                    task_id,
                    image_data.color_img
                )
                pose_future = self.executor.submit(
                    self._process_pose, 
                    task_id,
                    image_data.color_img
                )
                
                # 添加回调
                seg_future.add_done_callback(
                    lambda f, tid=task_id: self._task_completed(tid, "seg")
                )
                pose_future.add_done_callback(
                    lambda f, tid=task_id: self._task_completed(tid, "pose")
                )
                
            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"Task dispatch error: {e}")
    
    def _process_segmentation(self, task_id: int, color_img: np.ndarray):
        """处理分割任务"""
        try:
            return task_id, self.seg_model(color_img)[0]
        except Exception as e:
            logger.error(f"Segmentation error: {e}")
            return task_id, None
    
    def _process_pose(self, task_id: int, color_img: np.ndarray):
        """处理姿态任务"""
        try:
            return task_id, self.pose_model(color_img)[0]
        except Exception as e:
            logger.error(f"Pose estimation error: {e}")
            return task_id, None
    
    def _task_completed(self, task_id: int, task_type: str):
        """任务完成回调"""
        with self.lock:
            if task_id not in self.pending_tasks:
                return
            
            event, image_data = self.pending_tasks[task_id]
            
            # 更新任务状态
            if task_type == "seg":
                event.set()  # 标记分割完成
            elif task_type == "pose":
                # 如果分割已经完成，则准备结果
                if event.is_set():
                    seg_result = getattr(self, f"_seg_{task_id}", None)
                    pose_result = getattr(self, f"_pose_{task_id}", None)
                    
                    # 组装结果
                    result = {
                        "seg_result": seg_result,
                        "pose_result": pose_result,
                        "image_data": image_data
                    }
                    
                    # 清除临时属性
                    if hasattr(self, f"_seg_{task_id}"):
                        delattr(self, f"_seg_{task_id}")
                    if hasattr(self, f"_pose_{task_id}"):
                        delattr(self, f"_pose_{task_id}")
                    
                    # 放入结果队列
                    try:
                        self.result_queue.put((task_id, result), timeout=0.1)
                    except queue.Full:
                        logger.warning("Result queue full, result discarded")
                    
                    # 移除待处理任务
                    del self.pending_tasks[task_id]
                    
                    # 性能监控
                    self._log_performance()
    
    def _result_collector(self):
        """结果收集线程（按完成顺序输出）"""
        # 此版本不强制按顺序输出，而是按完成顺序
        while not self.shutdown_flag:
            try:
                task_id, result = self.result_queue.get(timeout=0.5)
                yield task_id, result
            except queue.Empty:
                continue
    
    def _log_performance(self):
        """记录性能指标"""
        self.processed_count += 1
        current_time = time.time()
        if current_time - self.last_log_time > 5.0:  # 每5秒记录一次
            fps = self.processed_count / (current_time - self.last_log_time)
            logger.info(f"Processing FPS: {fps:.2f}, Queue: {self.task_queue.qsize()}")
            self.processed_count = 0
            self.last_log_time = current_time
    
    def get_result(self, block=True, timeout=None):
        """获取结果（按完成顺序）"""
        try:
            _, result = self.result_queue.get(block=block, timeout=timeout)
            return result
        except queue.Empty:
            return None
    
    def shutdown(self):
        """关闭处理器"""
        self.shutdown_flag = True
        self.executor.shutdown(wait=True)