from ultralytics import YOLO
import numpy as np
from typing import Optional
import queue
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from collections import deque

from utils import *

logger = setup_logger()

SEG_MODEL_PATH = "./merge_yolo11n_seg_rknn_model"
POSE_MODEL_PATH = "./merge_yolo11n_pose_rknn_model"

class ImageData:
    """图像数据结构"""
    def __init__(self):
        self.timestamp: float = 0.0
        self.readable_timestamp: str = ""
        self.color_img: np.ndarray = None
        self.depth_img: np.ndarray = None
        self.color_path: Optional[str] = None

class ModelProcessor:
    """统一的模型处理器 - 避免重复加载模型"""
    def __init__(self):
        logger.info("Loading segmentation model...")
        self.seg_model = YOLO(SEG_MODEL_PATH, task="segment")
        logger.info("Loading pose model...")
        self.pose_model = YOLO(POSE_MODEL_PATH, task="pose")
        logger.info("Models loaded successfully")
        
        # 模型访问锁（如果NPU不支持并发）
        self.seg_lock = threading.Lock()
        self.pose_lock = threading.Lock()
    
    def process_segmentation(self, color_img):
        """处理分割任务"""
        try:
            with self.seg_lock:  # 如果NPU支持并发，可以移除这个锁
                seg_result = self.seg_model(color_img)
                return self._process_seg_result(seg_result[0])
        except Exception as e:
            logger.error(f"Segmentation error: {e}")
            return None
    
    def process_pose(self, color_img):
        """处理姿态任务"""
        try:
            with self.pose_lock:  # 如果NPU支持并发，可以移除这个锁
                pose_result = self.pose_model(color_img)
                return self._process_pose_result(pose_result[0])
        except Exception as e:
            logger.error(f"Pose estimation error: {e}")
            return None
    
    def _process_seg_result(self, result):
        """处理分割结果"""
        try:
            if not hasattr(result, 'masks') or result.masks is None:
                return None
            masks_xy = result.masks.xy
            if len(masks_xy) == 0:
                return None
            return masks_xy[0]
        except Exception as e:
            logger.error(f"Seg result processing error: {e}")
            return None
    
    def _process_pose_result(self, result):
        """处理姿态结果"""
        try:
            if not hasattr(result, 'keypoints') or result.keypoints is None:
                return None
            return result.keypoints
        except Exception as e:
            logger.error(f"Pose result processing error: {e}")
            return None

class SegPose:
    def __init__(self, seg_workers=2, pose_workers=2, max_queue_size=50):
        """
        优化的SegPose处理器
        
        Args:
            seg_workers: 分割工作线程数（NPU建议1-2个）
            pose_workers: 姿态工作线程数（NPU建议1-2个）
            max_queue_size: 最大队列大小
        """
        self.shutdown_flag = False
        self.counter_lock = threading.Lock()
        
        # 创建共享的模型处理器
        self.model_processor = ModelProcessor()
        
        # 创建线程池 - 减少线程数量避免NPU资源竞争
        self.seg_executor = ThreadPoolExecutor(max_workers=seg_workers, thread_name_prefix="SegWorker")
        self.pose_executor = ThreadPoolExecutor(max_workers=pose_workers, thread_name_prefix="PoseWorker")
        
        # 任务管理
        self.input_queue = queue.Queue(maxsize=max_queue_size)
        self.result_queue = queue.Queue(maxsize=max_queue_size)
        self.task_counter = 0
        
        # 启动处理线程
        self.processing_threads = []
        # 创建多个处理线程来并行处理任务
        for i in range(min(4, max(seg_workers, pose_workers))):
            thread = threading.Thread(target=self._batch_process_tasks, 
                                    name=f"BatchProcessor-{i}", daemon=True)
            thread.start()
            self.processing_threads.append(thread)
        
        logger.info(f"SegPose initialized with {seg_workers} seg workers, {pose_workers} pose workers")
    
    def put(self, image_data: ImageData):
        """添加新任务"""
        with self.counter_lock:
            task_id = self.task_counter
            self.task_counter += 1
        
        try:
            self.input_queue.put((task_id, image_data), block=False)
            return task_id
        except queue.Full:
            return -1
    
    def _batch_process_tasks(self):
        """批量处理任务 - 提高并行度"""
        while not self.shutdown_flag:
            try:
                # 批量获取任务
                batch_tasks = []
                batch_size = min(4, self.input_queue.qsize() + 1)  # 动态批量大小
                
                for _ in range(batch_size):
                    try:
                        task = self.input_queue.get(timeout=0.1)
                        batch_tasks.append(task)
                    except queue.Empty:
                        break
                
                if not batch_tasks:
                    continue
                
                # 并行提交所有任务
                futures = []
                for task_id, image_data in batch_tasks:
                    # 同时提交分割和姿态任务
                    seg_future = self.seg_executor.submit(
                        self.model_processor.process_segmentation, 
                        image_data.color_img
                    )
                    pose_future = self.pose_executor.submit(
                        self.model_processor.process_pose, 
                        image_data.color_img
                    )
                    futures.append((task_id, image_data, seg_future, pose_future))
                
                # 收集结果（不强制按顺序）
                for task_id, image_data, seg_future, pose_future in futures:
                    try:
                        # 等待两个任务都完成
                        seg_result = seg_future.result(timeout=5.0)  # 5秒超时
                        pose_result = pose_future.result(timeout=5.0)
                        
                        result = {
                            "task_id": task_id,
                            "seg_result": seg_result,
                            "pose_result": pose_result,
                            "image_data": image_data
                        }
                        
                        # 直接放入结果队列（不按顺序）
                        try:
                            self.result_queue.put(result, block=False)
                        except queue.Full:
                            # 队列满时，尝试丢弃最旧的结果
                            try:
                                self.result_queue.get(block=False)
                                self.result_queue.put(result, block=False)
                            except queue.Full:
                                logger.warning(f"Result queue full, dropping task {task_id}")
                        
                    except Exception as e:
                        logger.error(f"Error processing task {task_id}: {e}")
                        # 即使失败也要放入结果
                        error_result = {
                            "task_id": task_id,
                            "seg_result": None,
                            "pose_result": None,
                            "image_data": image_data
                        }
                        try:
                            self.result_queue.put(error_result, block=False)
                        except queue.Full:
                            pass
                
            except Exception as e:
                logger.error(f"Batch processing error: {e}")
                time.sleep(0.1)
    
    def get_result(self, block=False, timeout=None):
        """获取结果"""
        if block:
            try:
                return self.result_queue.get(timeout=timeout)
            except queue.Empty:
                return None
        else:
            try:
                return self.result_queue.get_nowait()
            except queue.Empty:
                return None
    
    def has_result(self):
        """检查是否有结果可获取"""
        return not self.result_queue.empty()
    
    def pending_count(self):
        """获取待处理任务数量"""
        return self.input_queue.qsize()
    
    def get_queue_status(self):
        """获取队列状态"""
        return {
            "input_queue": self.input_queue.qsize(),
            "result_queue": self.result_queue.qsize(),
            "max_size": self.input_queue.maxsize
        }
    
    def shutdown(self):
        """关闭处理器"""
        logger.info("Shutting down SegPose processor...")
        self.shutdown_flag = True
        
        # 等待处理线程结束
        for thread in self.processing_threads:
            thread.join(timeout=2.0)
        
        # 关闭线程池
        self.seg_executor.shutdown(wait=True)
        self.pose_executor.shutdown(wait=True)
        logger.info("SegPose processor shutdown complete")

# 性能测试函数
def benchmark_segpose():
    """性能基准测试"""
    logger.info("Starting SegPose benchmark...")
    
    # 创建测试图像
    test_img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
    
    # 创建处理器
    processor = SegPose(seg_workers=2, pose_workers=2, max_queue_size=20)
    
    # 提交测试任务
    start_time = time.time()
    task_count = 10
    
    for i in range(task_count):
        img_data = ImageData()
        img_data.color_img = test_img
        img_data.timestamp = time.time()
        
        task_id = processor.put(img_data)
        if task_id == -1:
            logger.warning(f"Task {i} rejected")
    
    # 收集结果
    results_collected = 0
    while results_collected < task_count and time.time() - start_time < 30:
        result = processor.get_result(block=True, timeout=1.0)
        if result:
            results_collected += 1
            logger.info(f"Collected result {results_collected}/{task_count}")
    
    total_time = time.time() - start_time
    fps = results_collected / total_time
    
    logger.info(f"Benchmark complete: {results_collected}/{task_count} tasks in {total_time:.2f}s")
    logger.info(f"Average FPS: {fps:.2f}")
    
    processor.shutdown()
    return fps

if __name__ == "__main__":
    benchmark_segpose()