#!/usr/bin/env python3

import os
import threading
import queue
import time
import cv2
import numpy as np
from ultralytics import YOLO
from datetime import datetime
from logger import setup_logger
from collections import deque
import weakref
from concurrent.futures import ThreadPoolExecutor

logger = setup_logger()

# 模型路径和核心配置
SEG_MODEL_PATH = "./merge_seg_rknn_model"
POSE_MODEL_PATH = "./merge_pose_rknn_model"
SEG_CORE = 0  # 分割模型专用核心
POSE_CORE = 1  # 关键点模型专用核心

# 优化配置
SEG_WORKER_THREADS = 2   # 分割模型工作线程数量
POSE_WORKER_THREADS = 2   # 姿态模型工作线程数量
BATCH_SIZE = 4           # 批处理大小
QUEUE_SIZE = 30          # 增大队列以支持更高并发

class MultiThreadNPUWorker:
    """多线程NPU工作器 - 单个NPU核心多线程处理"""
    def __init__(self, core_id, model_path, task_type, num_threads=4):
        self.core_id = core_id
        self.task_type = task_type
        self.model_path = model_path
        self.num_threads = num_threads
        
        # 使用更大的队列支持高并发
        self.task_queue = queue.Queue(maxsize=QUEUE_SIZE)
        self.running = True
        
        # 每个线程一个模型实例，避免共享冲突
        self.models = {}
        self.model_ready = threading.Event()
        
        # 性能统计
        self.stats = {
            'total_tasks': 0,
            'successful_tasks': 0,
            'failed_tasks': 0,
            'queue_full_count': 0,
            'avg_inference_time': 0.0,
            'last_reset': time.time(),
            'active_threads': 0,
            'peak_queue_size': 0,
            'total_inference_time': 0.0
        }
        self.stats_lock = threading.Lock()
        
        # 创建线程池
        self.thread_pool = []
        self.executor = ThreadPoolExecutor(max_workers=num_threads, thread_name_prefix=f"NPU-{core_id}-{task_type}")
        
        # 启动工作线程
        self._start_workers()
        
        logger.info(f"多线程NPU核心 {core_id} 工作器已启动 ({task_type}, {num_threads}线程)")
    
    def _start_workers(self):
        """启动多个工作线程"""
        for i in range(self.num_threads):
            future = self.executor.submit(self._worker_loop, i)
            self.thread_pool.append(future)
    
    def _load_model(self, thread_id):
        """在指定线程中加载模型"""
        try:
            thread_name = f"Thread-{thread_id}"
            
            # 设置NPU核心绑定
            os.environ['RKNN_SERVER_CORE_MASK'] = str(1 << self.core_id)
            
            # 为每个线程创建独立的模型实例
            if self.task_type == "segment":
                model = YOLO(self.model_path, task='segment')
            else:  # pose
                model = YOLO(self.model_path, task='pose')
            
            self.models[thread_id] = model
            
            # 模型预热 - 用dummy数据进行一次推理
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 开始模型预热...")
            dummy_img = np.zeros((640, 640, 3), dtype=np.uint8)
            _ = model.predict(dummy_img, save=False, verbose=False)
            
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: {self.task_type}模型加载并预热完成")
            return True
            
        except Exception as e:
            logger.error(f"核心 {self.core_id}, 线程 {thread_id} 模型加载失败: {e}")
            return False
    
    def _worker_loop(self, thread_id):
        """优化的多线程工作循环"""
        # 首先加载模型
        if not self._load_model(thread_id):
            return
        
        # 通知模型加载完成
        if len(self.models) == 1:  # 第一个线程设置事件
            self.model_ready.set()
        
        model = self.models[thread_id]
        
        # 增加活跃线程计数
        with self.stats_lock:
            self.stats['active_threads'] += 1
        
        # 批处理缓冲区
        batch_buffer = []
        last_batch_time = time.time()
        batch_timeout = 0.002  # 2ms批处理超时
        max_batch_size = BATCH_SIZE
        
        logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 开始处理任务...")
        
        while self.running:
            try:
                # 非阻塞获取任务 - 更积极的任务获取
                batch_filled = False
                for _ in range(max_batch_size):
                    try:
                        task = self.task_queue.get(timeout=0.0001)  # 更短的超时
                        batch_buffer.append(task)
                        if len(batch_buffer) >= max_batch_size:
                            batch_filled = True
                            break
                    except queue.Empty:
                        break
                
                # 检查是否需要处理批次
                current_time = time.time()
                should_process = (
                    batch_filled or
                    (len(batch_buffer) > 0 and (current_time - last_batch_time) > batch_timeout)
                )
                
                if should_process and batch_buffer:
                    # 记录队列峰值大小
                    queue_size = self.task_queue.qsize()
                    with self.stats_lock:
                        self.stats['peak_queue_size'] = max(self.stats['peak_queue_size'], queue_size)
                    
                    # 处理批次
                    self._process_batch_optimized(batch_buffer, model, thread_id)
                    batch_buffer.clear()
                    last_batch_time = current_time
                
                # 如果没有任务，使用更短的休眠时间
                if not batch_buffer:
                    time.sleep(0.00005)  # 0.05ms，更积极地检查新任务
                    
            except Exception as e:
                logger.error(f"核心 {self.core_id}, 线程 {thread_id} 工作循环错误: {e}")
                # 清空错误的批次
                for task in batch_buffer:
                    self._send_error_result(task, str(e))
                batch_buffer.clear()
        
        # 减少活跃线程计数
        with self.stats_lock:
            self.stats['active_threads'] -= 1
        
        logger.info(f"NPU核心 {self.core_id}, 线程 {thread_id} 工作线程已退出")
    
    def _process_batch_optimized(self, tasks, model, thread_id):
        """优化的批处理"""
        if not tasks:
            return
        
        # 对于YOLO模型，目前还是逐个处理，但可以优化预处理
        # 未来可以考虑真正的批处理推理
        for task in tasks:
            try:
                self._process_single_task_fast(task, model, thread_id)
            except Exception as e:
                logger.error(f"核心 {self.core_id}, 线程 {thread_id} 处理任务错误: {e}")
                self._send_error_result(task, str(e))
    
    def _process_single_task_fast(self, task, model, thread_id):
        """快速单任务处理"""
        img = task['img']
        result_queue = task['result_queue']
        task_id = task['task_id']
        
        # 执行推理
        start_time = time.time()
        results = model.predict(img, save=False, verbose=False)
        inference_time = time.time() - start_time
        
        # 处理结果
        processed_result = self._process_result(results[0])
        
        # 更新统计信息
        self._update_stats_fast(inference_time, processed_result is not None)
        
        # 返回结果
        result_data = {
            'task_type': self.task_type,
            'task_id': task_id,
            'result': processed_result,
            'inference_time': inference_time,
            'success': processed_result is not None,
            'thread_id': thread_id
        }
        
        # 非阻塞发送结果
        try:
            result_queue.put_nowait(result_data)
        except queue.Full:
            logger.warning(f"核心 {self.core_id}, 线程 {thread_id} 结果队列已满，丢弃结果")
    
    def _send_error_result(self, task, error_msg):
        """发送错误结果"""
        try:
            task['result_queue'].put_nowait({
                'task_type': self.task_type,
                'task_id': task['task_id'],
                'result': None,
                'inference_time': 0,
                'success': False,
                'error': error_msg
            })
        except:
            pass
    
    def _process_result(self, result):
        """快速结果处理"""
        try:
            if self.task_type == "segment":
                if not hasattr(result, 'masks') or result.masks is None:
                    return None
                masks_xy = result.masks.xy
                if len(masks_xy) == 0:
                    return None
                return masks_xy[0]
            else:  # pose
                if not hasattr(result, 'keypoints') or result.keypoints is None:
                    return None
                return result.keypoints
        except Exception as e:
            logger.debug(f"结果处理错误 ({self.task_type}): {e}")
            return None
    
    def _update_stats_fast(self, inference_time, success):
        """快速统计更新"""
        with self.stats_lock:
            self.stats['total_tasks'] += 1
            self.stats['total_inference_time'] += inference_time
            
            if success:
                self.stats['successful_tasks'] += 1
                # 计算平均推理时间
                self.stats['avg_inference_time'] = (
                    self.stats['total_inference_time'] / self.stats['successful_tasks']
                )
            else:
                self.stats['failed_tasks'] += 1
    
    def add_task(self, img, result_queue, task_id):
        """优化的任务添加 - 支持高并发"""
        try:
            self.task_queue.put_nowait({
                'img': img,
                'result_queue': result_queue,
                'task_id': task_id
            })
            return True
        except queue.Full:
            with self.stats_lock:
                self.stats['queue_full_count'] += 1
            
            # 队列满时，尝试清理一些旧任务（可选）
            self._clear_old_tasks()
            
            # 再次尝试
            try:
                self.task_queue.put_nowait({
                    'img': img,
                    'result_queue': result_queue,
                    'task_id': task_id
                })
                return True
            except queue.Full:
                return False
    
    def _clear_old_tasks(self):
        """清理旧任务 - 保持队列流畅"""
        cleared = 0
        max_clear = self.task_queue.qsize() // 4  # 清理1/4的旧任务
        
        temp_tasks = []
        for _ in range(max_clear):
            try:
                task = self.task_queue.get_nowait()
                # 这里可以添加逻辑判断哪些任务需要保留
                # 目前简单丢弃旧任务
                cleared += 1
            except queue.Empty:
                break
        
        if cleared > 0:
            logger.debug(f"核心 {self.core_id}: 清理了 {cleared} 个旧任务")
    
    def get_stats(self):
        """获取性能统计"""
        with self.stats_lock:
            stats = self.stats.copy()
            # 计算实时指标
            if stats['total_tasks'] > 0:
                stats['success_rate'] = stats['successful_tasks'] / stats['total_tasks']
                uptime = time.time() - stats['last_reset']
                stats['tasks_per_second'] = stats['total_tasks'] / uptime if uptime > 0 else 0
                stats['queue_utilization'] = self.task_queue.qsize() / QUEUE_SIZE
            return stats
    
    def wait_for_ready(self, timeout=30):
        """等待模型加载完成"""
        return self.model_ready.wait(timeout)
    
    def stop(self):
        """停止工作器"""
        logger.info(f"正在停止NPU核心 {self.core_id} 工作器...")
        self.running = False
        
        # 关闭线程池
        self.executor.shutdown(wait=True, timeout=5.0)
        
        # 等待所有任务完成
        logger.info(f"NPU核心 {self.core_id} 工作器已停止")

class HighPerformanceSegPose:
    """高性能多线程SegPose"""
    def __init__(self):
        logger.info("初始化高性能多线程SegPose...")
        
        # 创建多线程NPU工作器
        self.seg_worker = MultiThreadNPUWorker(SEG_CORE, SEG_MODEL_PATH, "segment", SEG_WORKER_THREADS)
        self.pose_worker = MultiThreadNPUWorker(POSE_CORE, POSE_MODEL_PATH, "pose", POSE_WORKER_THREADS)
        
        # 等待模型加载完成
        logger.info("等待模型加载完成...")
        if not self.seg_worker.wait_for_ready(10):
            logger.error("分割模型加载超时")
        if not self.pose_worker.wait_for_ready(10):
            logger.error("姿态模型加载超时")
        
        logger.info("所有模型加载完成")
        
        self.task_counter = 0
        self.lock = threading.Lock()
        
        # 全局性能统计
        self.global_stats = {
            'total_requests': 0,
            'successful_requests': 0,
            'failed_requests': 0,
            'timeout_requests': 0,
            'avg_total_time': 0.0,
            'start_time': time.time(),
            'peak_fps': 0.0,
            'current_fps': 0.0
        }
        self.global_stats_lock = threading.Lock()
        
        # FPS计算
        self.fps_window = deque(maxlen=100)  # 最近100次请求的时间戳
        
        logger.info("高性能SegPose初始化完成")
    
    def get_result(self, img):
        """高性能并行获取结果，记录每步耗时"""
        request_start = time.perf_counter()
        
        # 1. 生成任务 ID
        start_task_id = time.perf_counter()
        with self.lock:
            self.task_counter += 1
            task_id = self.task_counter
        task_id_time = time.perf_counter() - start_task_id
        logger.info(f"[SEG_POSE] 生成任务 ID 耗时: {task_id_time:.4f} 秒")
        
        # 2. 创建结果队列
        start_queue_create = time.perf_counter()
        result_queue = queue.Queue(maxsize=10)
        queue_create_time = time.perf_counter() - start_queue_create
        logger.info(f"[SEG_POSE] 创建结果队列耗时: {queue_create_time:.4f} 秒")
        
        # 3. 提交任务到分割和姿态工作器
        start_task_submit = time.perf_counter()
        seg_submitted = self.seg_worker.add_task(img, result_queue, task_id)
        pose_submitted = self.pose_worker.add_task(img, result_queue, task_id)
        task_submit_time = time.perf_counter() - start_task_submit
        logger.info(f"[SEG_POSE] 提交任务耗时: {task_submit_time:.4f} 秒 (seg: {seg_submitted}, pose: {pose_submitted})")
        
        if not seg_submitted or not pose_submitted:
            logger.warning(f"[SEG_POSE] 任务提交失败 - seg: {seg_submitted}, pose: {pose_submitted}")
            self._update_global_stats(time.perf_counter() - request_start, False, 'submission_failed')
            return None, None
        
        # 4. 收集结果
        start_collect = time.perf_counter()
        seg_result, pose_result = self._collect_results_ultra_fast(result_queue, task_id, request_start)
        collect_time = time.perf_counter() - start_collect
        logger.info(f"[SEG_POSE] 收集结果耗时: {collect_time:.4f} 秒")
        
        # 5. 更新统计
        start_stats = time.perf_counter()
        total_time = time.perf_counter() - request_start
        success = seg_result is not None and pose_result is not None
        self._update_global_stats(total_time, success, 'timeout' if not success else 'success')
        self._update_fps_stats()
        stats_time = time.perf_counter() - start_stats
        logger.info(f"[SEG_POSE] 更新统计耗时: {stats_time:.4f} 秒")
        
        # 记录总耗时
        logger.info(f"[SEG_POSE] 总耗时: {total_time:.4f} 秒")
        
        return seg_result, pose_result
    
    def _collect_results_ultra_fast(self, result_queue, task_id, start_time):
        """超快速结果收集"""
        seg_result = None
        pose_result = None
        results_received = 0
        timeout = 3.0  # 进一步减少超时时间
        
        deadline = start_time + timeout
        
        while results_received < 2 and time.time() < deadline:
            remaining_time = deadline - time.time()
            if remaining_time <= 0:
                break
            
            try:
                # 使用极短的等待时间，更频繁地检查
                wait_time = min(0.001, remaining_time)
                result = result_queue.get(timeout=wait_time)
                
                if result['task_id'] != task_id:
                    continue
                
                if result['task_type'] == 'segment' and seg_result is None:
                    seg_result = result['result'] if result['success'] else None
                elif result['task_type'] == 'pose' and pose_result is None:
                    pose_result = result['result'] if result['success'] else None
                
                results_received += 1
                
                # 如果两个结果都收到了，立即返回
                if seg_result is not None and pose_result is not None:
                    break
                    
            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"[SEG_POSE] 结果收集错误: {e}")
                break
        
        return seg_result, pose_result
    
    def _update_global_stats(self, total_time, success, status):
        """更新全局统计"""
        with self.global_stats_lock:
            self.global_stats['total_requests'] += 1
            
            if success:
                self.global_stats['successful_requests'] += 1
                n = self.global_stats['successful_requests']
                self.global_stats['avg_total_time'] = (
                    self.global_stats['avg_total_time'] * (n-1) + total_time
                ) / n
            else:
                if status == 'timeout':
                    self.global_stats['timeout_requests'] += 1
                else:
                    self.global_stats['failed_requests'] += 1
    
    def _update_fps_stats(self):
        """更新FPS统计"""
        current_time = time.time()
        self.fps_window.append(current_time)
        
        if len(self.fps_window) >= 2:
            time_span = self.fps_window[-1] - self.fps_window[0]
            if time_span > 0:
                current_fps = (len(self.fps_window) - 1) / time_span
                with self.global_stats_lock:
                    self.global_stats['current_fps'] = current_fps
                    self.global_stats['peak_fps'] = max(self.global_stats['peak_fps'], current_fps)
    
    def get_comprehensive_stats(self):
        """获取综合性能报告"""
        global_stats = self.get_global_stats()
        seg_stats = self.seg_worker.get_stats()
        pose_stats = self.pose_worker.get_stats()
        
        return {
            'global': global_stats,
            'seg_worker': seg_stats,
            'pose_worker': pose_stats,
            'timestamp': time.time(),
            'total_active_threads': seg_stats['active_threads'] + pose_stats['active_threads'],
            'total_queue_size': seg_stats.get('queue_utilization', 0) + pose_stats.get('queue_utilization', 0)
        }
    
    def get_global_stats(self):
        """获取全局统计"""
        with self.global_stats_lock:
            stats = self.global_stats.copy()
            stats['uptime'] = time.time() - stats['start_time']
            if stats['total_requests'] > 0:
                stats['success_rate'] = stats['successful_requests'] / stats['total_requests']
                stats['avg_fps'] = stats['successful_requests'] / stats['uptime']
            return stats
    
    def shutdown(self):
        """优化的关闭流程"""
        logger.info("正在关闭高性能SegPose...")
        
        # 输出最终统计信息
        final_stats = self.get_comprehensive_stats()
        logger.info(f"最终统计:")
        logger.info(f"  总请求: {final_stats['global']['total_requests']}")
        logger.info(f"  成功率: {final_stats['global'].get('success_rate', 0)*100:.1f}%")
        logger.info(f"  平均FPS: {final_stats['global'].get('avg_fps', 0):.2f}")
        logger.info(f"  峰值FPS: {final_stats['global'].get('peak_fps', 0):.2f}")
        logger.info(f"  活跃线程: {final_stats['total_active_threads']}")
        
        # 停止workers
        self.seg_worker.stop()
        self.pose_worker.stop()
        
        logger.info("高性能SegPose已关闭")

# 为了保持向后兼容
class SegPose(HighPerformanceSegPose):
    """向后兼容的OptimizedSegPose类"""
    pass

if __name__ == "__main__":
    logger.info("开始高性能SegPose测试...")
    
    seg_pose = HighPerformanceSegPose()
    
    # 测试配置
    test_dir = "./data/Color1"
    image_files = [f for f in os.listdir(test_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
    
    if not image_files:
        logger.error(f"在目录 {test_dir} 中未找到图片文件")
        seg_pose.shutdown()
        exit(1)
    
    logger.info(f"找到 {len(image_files)} 张测试图片")
    
    # 预加载所有图片到内存
    test_images = []
    for img_file in image_files:
        img_path = os.path.join(test_dir, img_file)
        img = cv2.imread(img_path)
        if img is not None:
            test_images.append((img_file, img))
    
    logger.info(f"预加载了 {len(test_images)} 张图片")
    
    # 高强度测试
    test_rounds = 3  # 测试轮数
    images_per_round = len(test_images)
    
    for round_num in range(test_rounds):
        logger.info(f"\n开始第 {round_num + 1} 轮测试...")
        round_start = time.time()
        
        for i, (img_file, img) in enumerate(test_images):
            try:
                start_time = time.time()
                seg_result, pose_result = seg_pose.get_result(img)
                process_time = time.time() - start_time
                
                if (i + 1) % 20 == 0:  # 每20张图片报告一次
                    stats = seg_pose.get_global_stats()
                    logger.info(f"  处理进度: {i+1}/{images_per_round}, "
                               f"当前FPS: {stats.get('current_fps', 0):.2f}, "
                               f"处理时间: {process_time*1000:.1f}ms")
                
            except Exception as e:
                logger.error(f"处理图片 {img_file} 时出错: {str(e)}")
        
        round_time = time.time() - round_start
        round_fps = images_per_round / round_time
        logger.info(f"第 {round_num + 1} 轮完成: {round_fps:.2f} FPS")
    
    # 输出最终统计
    final_stats = seg_pose.get_comprehensive_stats()
    logger.info(f"\n最终性能报告:")
    logger.info(f"总处理: {final_stats['global']['total_requests']} 张图片")
    logger.info(f"成功率: {final_stats['global'].get('success_rate', 0)*100:.1f}%")
    logger.info(f"平均FPS: {final_stats['global'].get('avg_fps', 0):.2f}")
    logger.info(f"峰值FPS: {final_stats['global'].get('peak_fps', 0):.2f}")
    logger.info(f"分割模型线程利用: {final_stats['seg_worker']['active_threads']}/{SEG_WORKER_THREADS}")
    logger.info(f"姿态模型线程利用: {final_stats['pose_worker']['active_threads']}/{POSE_WORKER_THREADS}")
    
    seg_pose.shutdown()