#!/usr/bin/env python3

import os
import threading
import queue
import time
import cv2
import numpy as np
from ultralytics import YOLO
from datetime import datetime
from logger import setup_logger
from collections import deque
import weakref

logger = setup_logger()

# 模型路径和核心配置
SEG_MODEL_PATH = "./merge_yolo11n_seg_rknn_model"
POSE_MODEL_PATH = "./merge_yolo11n_pose_rknn_model"
SEG_CORE = 0  # 分割模型专用核心
POSE_CORE = 1  # 关键点模型专用核心

class OptimizedNPUWorker:
    """优化的NPU核心工作器"""
    def __init__(self, core_id, model_path, task_type):
        self.core_id = core_id
        self.task_type = task_type
        self.model_path = model_path
        
        # 使用更大的队列以减少阻塞
        self.task_queue = queue.Queue(maxsize=20)
        self.running = True
        self.model = None
        
        # 性能统计
        self.stats = {
            'total_tasks': 0,
            'successful_tasks': 0,
            'failed_tasks': 0,
            'queue_full_count': 0,
            'avg_inference_time': 0.0,
            'last_reset': time.time()
        }
        self.stats_lock = threading.Lock()
        
        # 批处理优化：预分配结果对象以减少GC压力
        self.result_pool = deque(maxlen=50)
        
        # 启动工作线程（使用更高优先级）
        self.thread = threading.Thread(target=self._worker_loop, name=f"NPU-{core_id}-{task_type}")
        self.thread.daemon = True
        self.thread.start()
        
        logger.info(f"优化NPU核心 {core_id} 工作器已启动 ({task_type})")
    
    def _load_model(self):
        """在工作线程中加载模型到指定NPU核心"""
        try:
            # 设置NPU核心绑定
            os.environ['RKNN_SERVER_CORE_MASK'] = str(1 << self.core_id)
            
            # 加载YOLO模型
            if self.task_type == "segment":
                self.model = YOLO(self.model_path, task='segment')
            else:  # pose
                self.model = YOLO(self.model_path, task='pose')
            
            # 模型预热 - 用dummy数据进行一次推理
            logger.info(f"核心 {self.core_id}: 开始模型预热...")
            dummy_img = np.zeros((640, 640, 3), dtype=np.uint8)
            _ = self.model.predict(dummy_img, save=False, verbose=False)
            
            logger.info(f"核心 {self.core_id}: {self.task_type}模型加载并预热完成")
            return True
        except Exception as e:
            logger.error(f"核心 {self.core_id} 模型加载失败: {e}")
            return False
    
    def _worker_loop(self):
        """优化的工作线程主循环"""
        # 首先加载模型
        if not self._load_model():
            return
        
        # 批处理缓冲区
        batch_buffer = []
        last_batch_time = time.time()
        batch_timeout = 0.005  # 5ms批处理超时
        max_batch_size = 1  # 目前保持单图处理，但预留批处理架构
        
        while self.running:
            try:
                # 非阻塞获取任务
                try:
                    task = self.task_queue.get(timeout=0.001)
                    batch_buffer.append(task)
                except queue.Empty:
                    pass
                
                # 检查是否需要处理批次
                current_time = time.time()
                should_process = (
                    len(batch_buffer) >= max_batch_size or
                    (len(batch_buffer) > 0 and (current_time - last_batch_time) > batch_timeout)
                )
                
                if should_process and batch_buffer:
                    self._process_batch(batch_buffer)
                    batch_buffer.clear()
                    last_batch_time = current_time
                
                # 如果没有任务，短暂休眠以减少CPU占用
                if not batch_buffer:
                    time.sleep(0.0001)  # 0.1ms
                    
            except Exception as e:
                logger.error(f"核心 {self.core_id} 工作循环错误: {e}")
                # 清空错误的批次
                for task in batch_buffer:
                    self._send_error_result(task, str(e))
                batch_buffer.clear()
        
        logger.info(f"NPU核心 {self.core_id} 工作线程已退出")
    
    def _process_batch(self, tasks):
        """处理任务批次"""
        for task in tasks:
            try:
                self._process_single_task(task)
            except Exception as e:
                logger.error(f"核心 {self.core_id} 处理任务错误: {e}")
                self._send_error_result(task, str(e))
    
    def _process_single_task(self, task):
        """处理单个任务 - 优化版本"""
        img = task['img']
        result_queue = task['result_queue']
        task_id = task['task_id']
        
        # 执行推理
        start_time = time.time()
        results = self.model.predict(img, save=False, verbose=False)
        inference_time = time.time() - start_time
        
        # 处理结果
        processed_result = self._process_result(results[0])
        
        # 更新统计信息
        self._update_stats(inference_time, processed_result is not None)
        
        # 返回结果 - 使用弱引用减少内存占用
        result_data = {
            'task_type': self.task_type,
            'task_id': task_id,
            'result': processed_result,
            'inference_time': inference_time,
            'success': processed_result is not None
        }
        
        # 非阻塞发送结果
        try:
            result_queue.put(result_data, timeout=0.001)
        except queue.Full:
            logger.warning(f"核心 {self.core_id} 结果队列已满，丢弃结果")
    
    def _send_error_result(self, task, error_msg):
        """发送错误结果"""
        try:
            task['result_queue'].put({
                'task_type': self.task_type,
                'task_id': task['task_id'],
                'result': None,
                'inference_time': 0,
                'success': False,
                'error': error_msg
            }, timeout=0.001)
        except:
            pass  # 静默处理队列满的情况
    
    def _process_result(self, result):
        """优化的结果处理"""
        try:
            if self.task_type == "segment":
                # 分割结果处理 - 添加快速检查
                if not hasattr(result, 'masks') or result.masks is None:
                    return None
                
                masks_xy = result.masks.xy
                if len(masks_xy) == 0:
                    return None
                
                # 返回第一个mask的轮廓点
                return masks_xy[0]
                
            else:  # pose
                if not hasattr(result, 'keypoints') or result.keypoints is None:
                    return None
                return result.keypoints
                
        except Exception as e:
            logger.debug(f"结果处理错误 ({self.task_type}): {e}")
            return None
    
    def _update_stats(self, inference_time, success):
        """更新性能统计"""
        with self.stats_lock:
            self.stats['total_tasks'] += 1
            if success:
                self.stats['successful_tasks'] += 1
                # 更新移动平均推理时间
                n = self.stats['successful_tasks']
                self.stats['avg_inference_time'] = (
                    self.stats['avg_inference_time'] * (n-1) + inference_time
                ) / n
            else:
                self.stats['failed_tasks'] += 1
    
    def add_task(self, img, result_queue, task_id):
        """优化的任务添加"""
        try:
            self.task_queue.put({
                'img': img,
                'result_queue': result_queue,
                'task_id': task_id
            }, timeout=0.001)  # 减少阻塞时间
            return True
        except queue.Full:
            with self.stats_lock:
                self.stats['queue_full_count'] += 1
            return False
    
    def get_stats(self):
        """获取性能统计"""
        with self.stats_lock:
            return self.stats.copy()
    
    def reset_stats(self):
        """重置统计信息"""
        with self.stats_lock:
            self.stats.update({
                'total_tasks': 0,
                'successful_tasks': 0,
                'failed_tasks': 0,
                'queue_full_count': 0,
                'avg_inference_time': 0.0,
                'last_reset': time.time()
            })
    
    def stop(self):
        """停止工作器"""
        self.running = False
        if self.thread.is_alive():
            self.thread.join(timeout=2.0)  # 增加超时时间

class OptimizedSegPose:
    """优化的双NPU并行分割和姿态估计类"""
    def __init__(self):
        logger.info("初始化优化SegPose类...")
        
        # 创建两个优化的NPU工作器
        self.seg_worker = OptimizedNPUWorker(SEG_CORE, SEG_MODEL_PATH, "segment")
        self.pose_worker = OptimizedNPUWorker(POSE_CORE, POSE_MODEL_PATH, "pose")
        
        # 更短的等待时间，因为模型预热已在worker中完成
        time.sleep(10)
        
        self.task_counter = 0
        self.lock = threading.Lock()
        
        # 全局性能统计
        self.global_stats = {
            'total_requests': 0,
            'successful_requests': 0,
            'failed_requests': 0,
            'timeout_requests': 0,
            'avg_total_time': 0.0,
            'start_time': time.time()
        }
        self.global_stats_lock = threading.Lock()
        
        # 结果缓存池，减少对象创建开销
        self.result_cache = queue.Queue(maxsize=100)
        
        logger.info("优化SegPose初始化完成")
    
    def get_result(self, img):
        """
        优化的并行获取分割和姿态估计结果
        :param img: 输入图像 (numpy array)
        :return: (seg_result, pose_result) - 分割轮廓点和关键点
        """
        start_time = time.time()
        
        with self.lock:
            self.task_counter += 1
            task_id = self.task_counter
        
        # 创建结果队列 - 使用较小的队列以节省内存
        result_queue = queue.Queue(maxsize=5)
        
        # 同时提交任务到两个NPU
        seg_submitted = self.seg_worker.add_task(img, result_queue, task_id)
        pose_submitted = self.pose_worker.add_task(img, result_queue, task_id)
        
        if not seg_submitted or not pose_submitted:
            logger.warning(f"任务提交失败 - seg: {seg_submitted}, pose: {pose_submitted}")
            self._update_global_stats(time.time() - start_time, False, 'submission_failed')
            return None, None
        
        # 优化的结果收集
        seg_result, pose_result = self._collect_results_fast(result_queue, task_id, start_time)
        
        total_time = time.time() - start_time
        success = seg_result is not None and pose_result is not None
        self._update_global_stats(total_time, success, 'timeout' if not success else 'success')
        
        return self.result_to_better(seg_result, pose_result)
    
    def _collect_results_fast(self, result_queue, task_id, start_time):
        """优化的快速结果收集"""
        seg_result = None
        pose_result = None
        results_received = 0
        timeout = 5.0  # 减少超时时间
        
        # 使用更精确的超时控制
        deadline = start_time + timeout
        
        while results_received < 2 and time.time() < deadline:
            remaining_time = deadline - time.time()
            if remaining_time <= 0:
                break
                
            try:
                # 动态调整超时时间
                wait_time = min(0.01, remaining_time)
                result = result_queue.get(timeout=wait_time)
                
                if result['task_id'] != task_id:
                    continue
                
                if result['task_type'] == 'segment' and seg_result is None:
                    seg_result = result['result'] if result['success'] else None
                    if result['success']:
                        logger.debug(f"分割推理完成，耗时: {result['inference_time']*1000:.1f}ms")
                    
                elif result['task_type'] == 'pose' and pose_result is None:
                    pose_result = result['result'] if result['success'] else None
                    if result['success']:
                        logger.debug(f"姿态推理完成，耗时: {result['inference_time']*1000:.1f}ms")
                
                results_received += 1
                
            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"结果收集错误: {e}")
                break
        
        if results_received < 2:
            logger.warning(f"推理超时或失败，仅收到 {results_received}/2 个结果")
        
        return seg_result, pose_result
    
    def _update_global_stats(self, total_time, success, status):
        """更新全局统计"""
        with self.global_stats_lock:
            self.global_stats['total_requests'] += 1
            
            if success:
                self.global_stats['successful_requests'] += 1
                # 更新移动平均时间
                n = self.global_stats['successful_requests']
                self.global_stats['avg_total_time'] = (
                    self.global_stats['avg_total_time'] * (n-1) + total_time
                ) / n
            else:
                if status == 'timeout':
                    self.global_stats['timeout_requests'] += 1
                else:
                    self.global_stats['failed_requests'] += 1
    
    def result_to_better(self, seg_result, pose_result):
        """保持原有接口"""
        return seg_result, pose_result
    
    def get_result_with_stats(self, img):
        """
        优化的获取结果并返回详细统计信息
        :param img: 输入图像
        :return: (seg_result, pose_result, stats)
        """
        start_time = time.time()
        seg_result, pose_result = self.get_result(img)
        total_time = time.time() - start_time
        
        # 获取详细的worker统计信息
        seg_stats = self.seg_worker.get_stats()
        pose_stats = self.pose_worker.get_stats()
        
        stats = {
            'total_time': total_time,
            'seg_success': seg_result is not None,
            'pose_success': pose_result is not None,
            'timestamp': datetime.now().isoformat(),
            'seg_worker_stats': seg_stats,
            'pose_worker_stats': pose_stats,
            'global_stats': self.get_global_stats()
        }
        
        return seg_result, pose_result, stats
    
    def get_global_stats(self):
        """获取全局性能统计"""
        with self.global_stats_lock:
            stats = self.global_stats.copy()
            stats['uptime'] = time.time() - stats['start_time']
            if stats['total_requests'] > 0:
                stats['success_rate'] = stats['successful_requests'] / stats['total_requests']
                stats['avg_fps'] = stats['successful_requests'] / stats['uptime']
            return stats
    
    def get_comprehensive_stats(self):
        """获取综合性能报告"""
        return {
            'global': self.get_global_stats(),
            'seg_worker': self.seg_worker.get_stats(),
            'pose_worker': self.pose_worker.get_stats(),
            'timestamp': time.time()
        }
    
    def reset_all_stats(self):
        """重置所有统计信息"""
        with self.global_stats_lock:
            self.global_stats.update({
                'total_requests': 0,
                'successful_requests': 0,
                'failed_requests': 0,
                'timeout_requests': 0,
                'avg_total_time': 0.0,
                'start_time': time.time()
            })
        
        self.seg_worker.reset_stats()
        self.pose_worker.reset_stats()
    
    def shutdown(self):
        """优化的关闭流程"""
        logger.info("正在关闭优化SegPose...")
        
        # 输出最终统计信息
        final_stats = self.get_comprehensive_stats()
        logger.info(f"最终统计 - 总请求: {final_stats['global']['total_requests']}, "
                   f"成功: {final_stats['global']['successful_requests']}, "
                   f"成功率: {final_stats['global'].get('success_rate', 0)*100:.1f}%")
        
        # 停止workers
        self.seg_worker.stop()
        self.pose_worker.stop()
        
        logger.info("优化SegPose已关闭")

# 为了保持向后兼容，提供原有的类名
class SegPose(OptimizedSegPose):
    """向后兼容的SegPose类"""
    pass

# 性能监控装饰器
def performance_monitor(func):
    """性能监控装饰器"""
    def wrapper(self, *args, **kwargs):
        start_time = time.time()
        try:
            result = func(self, *args, **kwargs)
            end_time = time.time()
            logger.debug(f"{func.__name__} 耗时: {(end_time - start_time)*1000:.2f}ms")
            return result
        except Exception as e:
            end_time = time.time()
            logger.error(f"{func.__name__} 执行失败，耗时: {(end_time - start_time)*1000:.2f}ms, 错误: {e}")
            raise
    return wrapper

if __name__ == "__main__":
    # 简单测试
    logger.info("开始SegPose优化版本测试...")
    
    seg_pose = OptimizedSegPose()
    
    # 创建测试图像
    test_img = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
    
    # 测试多次推理
    test_count = 10
    start_time = time.time()
    
    for i in range(test_count):
        seg_result, pose_result = seg_pose.get_result(test_img)
        logger.info(f"测试 {i+1}: seg={seg_result is not None}, pose={pose_result is not None}")
    
    total_time = time.time() - start_time
    logger.info(f"完成 {test_count} 次推理，总耗时: {total_time:.2f}s, 平均: {total_time/test_count:.3f}s")
    
    # 输出统计信息
    stats = seg_pose.get_comprehensive_stats()
    logger.info(f"统计信息: {stats}")
    
    seg_pose.shutdown()