#!/usr/bin/env python3

import os
import threading
import queue
import time
import cv2
import numpy as np
from ultralytics import YOLO
from datetime import datetime
from logger import setup_logger
from collections import deque
import weakref
from concurrent.futures import ThreadPoolExecutor

logger = setup_logger()

# 模型路径和核心配置
SEG_MODEL_PATH = "./merge_seg_rknn_model"
POSE_MODEL_PATH = "./merge_pose_rknn_model"
SEG_CORE = 0  # 分割模型专用核心
POSE_CORE = 1  # 关键点模型专用核心

# 优化配置
SEG_WORKER_THREADS = 2   # 分割模型工作线程数量
POSE_WORKER_THREADS = 2   # 姿态模型工作线程数量
QUEUE_SIZE = 100         # 增大队列支持更多并发

class NPUWorker:
    """简化的NPU工作器 - 单个NPU核心多线程处理"""
    def __init__(self, core_id, model_path, task_type, num_threads=2):
        self.core_id = core_id
        self.task_type = task_type
        self.model_path = model_path
        self.num_threads = num_threads
        
        # 任务队列
        self.task_queue = queue.Queue(maxsize=QUEUE_SIZE)
        self.running = True
        
        # 每个线程一个模型实例
        self.models = {}
        self.model_ready = threading.Event()
        
        # 创建线程池
        self.executor = ThreadPoolExecutor(max_workers=num_threads, thread_name_prefix=f"NPU-{core_id}-{task_type}")
        
        # 启动工作线程
        self._start_workers()
        
        logger.info(f"NPU核心 {core_id} 工作器已启动 ({task_type}, {num_threads}线程)")
    
    def _start_workers(self):
        """启动多个工作线程"""
        for i in range(self.num_threads):
            future = self.executor.submit(self._worker_loop, i)
    
    def _load_model(self, thread_id):
        """在指定线程中加载模型"""
        try:
            # 设置NPU核心绑定
            os.environ['RKNN_SERVER_CORE_MASK'] = str(1 << self.core_id)
            
            # 为每个线程创建独立的模型实例
            if self.task_type == "segment":
                model = YOLO(self.model_path, task='segment')
            else:  # pose
                model = YOLO(self.model_path, task='pose')
            
            self.models[thread_id] = model
            
            # 模型预热
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 开始模型预热...")
            dummy_img = np.zeros((640, 640, 3), dtype=np.uint8)
            _ = model.predict(dummy_img, save=False, verbose=False)
            
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: {self.task_type}模型加载完成")
            
            # 第一个线程加载完成后设置事件
            # if len(self.models) == 1:
            self.model_ready.set()
            
            return True
            
        except Exception as e:
            logger.error(f"核心 {self.core_id}, 线程 {thread_id} 模型加载失败: {e}")
            return False
    
    def _worker_loop(self, thread_id):
        """工作线程循环"""
        # 加载模型
        if not self._load_model(thread_id):
            return
        
        model = self.models[thread_id]
        
        logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 开始处理任务...")
        
        while self.running:
            try:
                # 获取任务，超时时间短一点
                task = self.task_queue.get(timeout=0.1)
                logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 获取到任务 {task['task_id']}")
                
                # 处理任务
                self._process_task(task, model, thread_id)
                
                # 标记任务完成
                self.task_queue.task_done()
                    
            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"核心 {self.core_id}, 线程 {thread_id} 处理任务错误: {e}")
        
        logger.info(f"NPU核心 {self.core_id}, 线程 {thread_id} 工作线程已退出")
    
    def _process_task(self, task, model, thread_id):
        """处理单个任务"""
        try:
            img = task['img']
            task_id = task['task_id']
            result_callback = task['callback']
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 开始处理任务 {task_id}")
            # print(img.type)
            # 执行推理
            start_time = time.time()
            results = model.predict(img, save=False, verbose=False)
            inference_time = time.time() - start_time
            
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 任务 {task_id} 推理完成，耗时 {inference_time:.3f}s")
            
            # 处理结果
            processed_result = self._process_result(results[0])
            
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 任务 {task_id} 结果处理完成")
            
            # 通过回调返回结果
            if result_callback:
                result_callback(self.task_type, task_id, processed_result, inference_time)
                logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 任务 {task_id} 回调完成")
                
        except Exception as e:
            logger.error(f"核心 {self.core_id}, 线程 {thread_id} 任务处理失败: {e}")
            import traceback
            logger.error(f"详细错误: {traceback.format_exc()}")
            if task.get('callback'):
                task['callback'](self.task_type, task['task_id'], None, 0)
    
    def _process_result(self, result):
        """处理推理结果"""
        try:
            if self.task_type == "segment":
                if not hasattr(result, 'masks') or result.masks is None:
                    return None
                masks_xy = result.masks.xy
                if len(masks_xy) == 0:
                    return None
                return masks_xy[0]
            else:  # pose
                if not hasattr(result, 'keypoints') or result.keypoints is None:
                    return None
                return result.keypoints
        except Exception as e:
            logger.debug(f"结果处理错误 ({self.task_type}): {e}")
            return None
    
    def add_task(self, img, task_id, callback):
        """添加任务"""
        try:
            task = {
                'img': img,
                'task_id': task_id,
                'callback': callback
            }
            self.task_queue.put_nowait(task)
            logger.info(f"核心 {self.core_id}: 成功添加任务 {task_id}，队列大小: {self.task_queue.qsize()}")
            return True
        except queue.Full:
            logger.warning(f"核心 {self.core_id} 任务队列已满，丢弃任务 {task_id}")
            return False
    
    def wait_for_ready(self, timeout=30):
        """等待模型加载完成"""
        return self.model_ready.wait(timeout)
    
    def stop(self):
        """停止工作器"""
        logger.info(f"正在停止NPU核心 {self.core_id} 工作器...")
        self.running = False
        
        # 兼容不同Python版本的shutdown方法
        try:
            # Python 3.9+
            self.executor.shutdown(wait=True, timeout=5.0)
        except TypeError:
            # 较老版本的Python
            self.executor.shutdown(wait=True)
            
        logger.info(f"NPU核心 {self.core_id} 工作器已停止")

class SegPose:
    """简化的高性能SegPose - 非阻塞处理"""
    def __init__(self):
        logger.info("初始化简化SegPose...")
        
        # 检查模型文件是否存在
        if not os.path.exists(SEG_MODEL_PATH):
            logger.error(f"分割模型文件不存在: {SEG_MODEL_PATH}")
            raise FileNotFoundError(f"分割模型文件不存在: {SEG_MODEL_PATH}")
        
        if not os.path.exists(POSE_MODEL_PATH):
            logger.error(f"姿态模型文件不存在: {POSE_MODEL_PATH}")
            raise FileNotFoundError(f"姿态模型文件不存在: {POSE_MODEL_PATH}")
        
        logger.info(f"模型文件检查通过 - SEG: {SEG_MODEL_PATH}, POSE: {POSE_MODEL_PATH}")
        
        # 创建NPU工作器
        try:
            logger.info("创建分割模型工作器...")
            self.seg_worker = NPUWorker(SEG_CORE, SEG_MODEL_PATH, "segment", SEG_WORKER_THREADS)
        except Exception as e:
            logger.error(f"创建分割模型工作器失败: {e}")
            raise RuntimeError(f"创建分割模型工作器失败: {e}")
        
        try:
            logger.info("创建姿态模型工作器...")
            self.pose_worker = NPUWorker(POSE_CORE, POSE_MODEL_PATH, "pose", POSE_WORKER_THREADS)
        except Exception as e:
            logger.error(f"创建姿态模型工作器失败: {e}")
            # 如果姿态模型创建失败，需要停止已创建的分割模型工作器
            self.seg_worker.stop()
            raise RuntimeError(f"创建姿态模型工作器失败: {e}")
        
        # 等待模型加载完成
        # 等待模型加载完成
        logger.info("等待分割模型加载完成...")
        seg_ready = self.seg_worker.wait_for_ready(10)  # 缩短等待时间，因为模型已经在加载
        logger.info(f"分割模型等待结果: {seg_ready}")
        
        if not seg_ready:
            logger.error("分割模型加载超时，但可能已经加载完成，继续尝试...")
            # 不要立即失败，先检查模型是否真的加载了
        
        logger.info("分割模型加载完成")
            
        logger.info("等待姿态模型加载完成...")
        pose_ready = self.pose_worker.wait_for_ready(10)  # 缩短等待时间
        logger.info(f"姿态模型等待结果: {pose_ready}")
        
        if not pose_ready:
            logger.error("姿态模型加载超时，但可能已经加载完成，继续尝试...")
            # 不要立即失败，先检查模型是否真的加载了
        
        logger.info("姿态模型加载完成")
        
        logger.info("所有模型加载完成")
        
        # 任务管理
        self.task_counter = 0
        self.lock = threading.Lock()
        
        # 结果存储 - 临时存储等待配对的结果
        self.temp_results = {}  # task_id -> {'seg': result, 'pose': result}
        self.temp_results_lock = threading.Lock()
        
        # 完成的结果队列
        self.completed_results = queue.Queue(maxsize=200)
        
        # 性能统计
        self.stats = {
            'total_put': 0,
            'total_complete': 0,
            'total_get': 0,
            'get_hit': 0,
            'get_miss': 0
        }
        self.stats_lock = threading.Lock()
        
        logger.info("简化SegPose初始化完成")
    
    def put(self, image_color):
        """非阻塞地提交图片处理任务"""
        with self.lock:
            self.task_counter += 1
            task_id = self.task_counter
        
        # 初始化临时结果记录
        with self.temp_results_lock:
            self.temp_results[task_id] = {'seg': None, 'pose': None}
        
        # 创建结果回调函数
        def result_callback(task_type, tid, result, inference_time):
            self._handle_result(tid, task_type, result)
        
        # 同时提交到两个NPU
        seg_ok = self.seg_worker.add_task(image_color, task_id, result_callback)
        pose_ok = self.pose_worker.add_task(image_color, task_id, result_callback)
        
        if not seg_ok or not pose_ok:
            # 如果提交失败，清理临时结果记录
            with self.temp_results_lock:
                self.temp_results.pop(task_id, None)
            logger.warning(f"任务 {task_id} 提交失败")
            return None
        
        # 更新统计
        with self.stats_lock:
            self.stats['total_put'] += 1
        
        return task_id
    
    def _handle_result(self, task_id, task_type, result):
        """处理单个结果回调"""
        logger.info(f"收到结果回调: 任务{task_id}, 类型{task_type}, 结果是否为空: {result is None}")
        
        with self.temp_results_lock:
            if task_id not in self.temp_results:
                logger.warning(f"收到未知任务 {task_id} 的结果")
                return
            
            # 保存结果 - 修复bug：使用正确的键名
            if task_type == "segment":
                self.temp_results[task_id]['seg'] = result
            elif task_type == "pose":
                self.temp_results[task_id]['pose'] = result
            
            # 检查是否两个结果都完成了
            task_result = self.temp_results[task_id]
            seg_ready = task_result['seg'] is not None
            pose_ready = task_result['pose'] is not None
            
            logger.info(f"任务 {task_id} 状态检查: seg_ready={seg_ready}, pose_ready={pose_ready}")
            
            if seg_ready and pose_ready:
                logger.info(f"任务 {task_id} 完成，seg和pose结果都已收到")
                # 两个结果都完成，放入完成队列
                try:
                    result_data = {
                        'task_id': task_id,
                        'seg_result': task_result['seg'],
                        'pose_result': task_result['pose']
                    }
                    
                    self.completed_results.put_nowait(result_data)
                    logger.info(f"任务 {task_id} 结果已成功放入完成队列，队列大小: {self.completed_results.qsize()}")
                    
                    # 清理临时结果
                    self.temp_results.pop(task_id, None)
                    
                    # 更新统计
                    with self.stats_lock:
                        self.stats['total_complete'] += 1
                        
                except queue.Full:
                    logger.warning(f"完成结果队列已满，丢弃任务 {task_id} 的结果")
                    self.temp_results.pop(task_id, None)
            else:
                logger.info(f"任务 {task_id} 部分完成: seg={seg_ready}, pose={pose_ready}")
    
    def get_result(self):
        """非阻塞获取一个完成的结果，没有结果返回None, None"""
        with self.stats_lock:
            self.stats['total_get'] += 1
        
        try:
            # 非阻塞地从队列头部获取结果
            result = self.completed_results.get_nowait()
            
            logger.info(f"成功获取到完成的结果: 任务{result['task_id']}")
            
            with self.stats_lock:
                self.stats['get_hit'] += 1
            
            return result['seg_result'], result['pose_result']
            
        except queue.Empty:
            # 队列为空，返回None
            with self.stats_lock:
                self.stats['get_miss'] += 1
            return None, None
    
    def get_pending_count(self):
        """获取待处理和已完成但未取走的任务数量"""
        with self.temp_results_lock:
            processing = len(self.temp_results)
        
        completed_pending = self.completed_results.qsize()
        
        return {
            'processing': processing,
            'completed_pending': completed_pending,
            'total_pending': processing + completed_pending
        }
    
    def get_stats(self):
        """获取性能统计"""
        with self.stats_lock:
            stats = self.stats.copy()
        
        pending = self.get_pending_count()
        stats.update(pending)
        
        # 计算命中率
        if stats['total_get'] > 0:
            stats['hit_rate'] = stats['get_hit'] / stats['total_get']
        else:
            stats['hit_rate'] = 0.0
        
        return stats
    
    def clear_old_results(self, max_age_seconds=60):
        """清理过期的临时结果"""
        current_time = time.time()
        cleared = 0
        
        # 这里简化处理，因为临时结果没有时间戳
        # 可以考虑定期清理所有临时结果，或者添加时间戳字段
        with self.temp_results_lock:
            # 清理一些可能的孤儿结果（只有一个seg或pose的结果）
            orphan_tasks = []
            for tid, results in self.temp_results.items():
                if results['seg'] is None or results['pose'] is None:
                    # 这里可以添加更复杂的逻辑来判断是否需要清理
                    pass
            
        return cleared
    
    def shutdown(self):
        """关闭SegPose"""
        logger.info("正在关闭SegPose...")
        
        # 输出最终统计
        stats = self.get_stats()
        logger.info(f"最终统计: {stats}")
        
        # 停止workers
        self.seg_worker.stop()
        self.pose_worker.stop()
        
        logger.info("SegPose已关闭")

if __name__ == "__main__":
    logger.info("开始SegPose测试...")
    
    seg_pose = SegPose()
    
    # 测试配置
    test_dir = "./data/Color1"
    image_files = [f for f in os.listdir(test_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
    
    if not image_files:
        logger.error(f"在目录 {test_dir} 中未找到图片文件")
        seg_pose.shutdown()
        exit(1)
    
    logger.info(f"找到 {len(image_files)} 张测试图片")
    
    # 预加载图片
    test_images = []
    for img_file in image_files[300:400]:  # 只测试前10张
        img_path = os.path.join(test_dir, img_file)
        img = cv2.imread(img_path)
        if img is not None:
            test_images.append((img_file, img))
    
    logger.info(f"预加载了 {len(test_images)} 张图片")
    
    # 测试：快速提交任务
    logger.info("开始快速提交任务...")
    submitted_tasks = []
    
    start_time = time.time()
    for i, (img_file, img) in enumerate(test_images * 3):  # 重复3次增加测试量
        task_id = seg_pose.put(img)
        if task_id:
            submitted_tasks.append(task_id)
        
        if (i + 1) % 10 == 0:
            pending = seg_pose.get_pending_count()
            logger.info(f"已提交 {i+1} 个任务，待处理: {pending}")
    
    submit_time = time.time() - start_time
    logger.info(f"提交完成，用时 {submit_time:.2f}s，提交速度: {len(submitted_tasks)/submit_time:.1f} 任务/秒")
    
    # 测试：非阻塞获取结果
    logger.info("开始获取结果...")
    collected_results = []
    last_stats_time = time.time()
    
    while len(collected_results) < len(submitted_tasks):
        seg_result, pose_result = seg_pose.get_result()
        
        if seg_result is not None and pose_result is not None:
            collected_results.append((seg_result, pose_result))
            
            # 每收集10个结果打印一次状态
            if len(collected_results) % 10 == 0:
                pending = seg_pose.get_pending_count()
                logger.info(f"已收集 {len(collected_results)} 个结果，待处理: {pending}")
        else:
            # 没有结果时短暂休眠
            time.sleep(0.01)
        
        # 每秒打印一次统计
        if time.time() - last_stats_time > 1.0:
            stats = seg_pose.get_stats()
            logger.info(f"统计: {stats}")
            last_stats_time = time.time()
    
    total_time = time.time() - start_time
    logger.info(f"测试完成！")
    logger.info(f"总耗时: {total_time:.2f}s")
    logger.info(f"平均处理速度: {len(collected_results)/total_time:.1f} 张/秒")
    
    # 最终统计
    final_stats = seg_pose.get_stats()
    logger.info(f"最终统计: {final_stats}")
    
    seg_pose.shutdown()