#!/usr/bin/env python3

import os
import threading
import queue
import time
import cv2
import numpy as np
from ultralytics import YOLO
from datetime import datetime
from logger import setup_logger
from collections import deque
import weakref
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from typing import Optional
import heapq

logger = setup_logger()

@dataclass
class ImageData:
    """图像数据结构"""
    timestamp: str
    readable_timestamp: str
    color_img: np.ndarray
    depth_img: np.ndarray
    color_path: Optional[str] = None

# 模型路径和核心配置
SEG_MODEL_PATH = "./merge_seg_rknn_model"
POSE_MODEL_PATH = "./merge_pose_rknn_model"
SEG_CORE = 0  # 分割模型专用核心
POSE_CORE = 1  # 关键点模型专用核心

# 优化配置
SEG_WORKER_THREADS = 4   # 分割模型工作线程数量
POSE_WORKER_THREADS = 4   # 姿态模型工作线程数量
QUEUE_SIZE = 30         # 增大队列支持更多并发

class NPUWorker:
    """简化的NPU工作器 - 单个NPU核心多线程处理"""
    def __init__(self, core_id, model_path, task_type, num_threads=2):
        self.core_id = core_id
        self.task_type = task_type
        self.model_path = model_path
        self.num_threads = num_threads
        
        # 任务队列
        self.task_queue = queue.Queue(maxsize=QUEUE_SIZE)
        self.running = True
        
        # 每个线程一个模型实例
        self.models = {}
        self.model_ready = threading.Event()
        
        # 创建线程池
        self.executor = ThreadPoolExecutor(max_workers=num_threads, thread_name_prefix=f"NPU-{core_id}-{task_type}")
        
        # 启动工作线程
        self._start_workers()
        
        logger.info(f"NPU核心 {core_id} 工作器已启动 ({task_type}, {num_threads}线程)")
    
    def _start_workers(self):
        """启动多个工作线程"""
        for i in range(self.num_threads):
            future = self.executor.submit(self._worker_loop, i)
    
    def _load_model(self, thread_id):
        """在指定线程中加载模型"""
        try:
            # 设置NPU核心绑定
            os.environ['RKNN_SERVER_CORE_MASK'] = str(1 << self.core_id)
            
            # 为每个线程创建独立的模型实例
            if self.task_type == "segment":
                model = YOLO(self.model_path, task='segment')
            else:  # pose
                model = YOLO(self.model_path, task='pose')
            
            self.models[thread_id] = model
            
            # 模型预热
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 开始模型预热...")
            dummy_img = np.zeros((640, 640, 3), dtype=np.uint8)
            _ = model.predict(dummy_img, save=False, verbose=False)
            
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: {self.task_type}模型加载完成")
            
            self.model_ready.set()
            
            return True
            
        except Exception as e:
            logger.error(f"核心 {self.core_id}, 线程 {thread_id} 模型加载失败: {e}")
            return False
    
    def _worker_loop(self, thread_id):
        """工作线程循环"""
        # 加载模型
        if not self._load_model(thread_id):
            return
        
        model = self.models[thread_id]
        
        logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 开始处理任务...")
        
        while self.running:
            try:
                # 获取任务，超时时间短一点
                task = self.task_queue.get(timeout=0.1)
                logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 获取到任务 {task['task_id']}")
                
                # 处理任务
                self._process_task(task, model, thread_id)
                
                # 标记任务完成
                self.task_queue.task_done()
                    
            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"核心 {self.core_id}, 线程 {thread_id} 处理任务错误: {e}")
        
        logger.info(f"NPU核心 {self.core_id}, 线程 {thread_id} 工作线程已退出")
    
    def _process_task(self, task, model, thread_id):
        """处理单个任务"""
        try:
            # 从ImageData中提取color_img用于推理
            image_data = task['image_data']
            img = image_data.color_img
            task_id = task['task_id']
            result_callback = task['callback']
            
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 开始处理任务 {task_id}")
            
            # 执行推理
            start_time = time.time()
            results = model.predict(img, save=False, verbose=False)
            inference_time = time.time() - start_time
            
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 任务 {task_id} 推理完成，耗时 {inference_time:.3f}s")
            
            # 处理结果
            processed_result = self._process_result(results[0])
            
            logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 任务 {task_id} 结果处理完成")
            
            # 通过回调返回结果
            if result_callback:
                result_callback(self.task_type, task_id, processed_result, inference_time)
                logger.info(f"核心 {self.core_id}, 线程 {thread_id}: 任务 {task_id} 回调完成")
                
        except Exception as e:
            logger.error(f"核心 {self.core_id}, 线程 {thread_id} 任务处理失败: {e}")
            import traceback
            logger.error(f"详细错误: {traceback.format_exc()}")
            if task.get('callback'):
                task['callback'](self.task_type, task['task_id'], None, 0)
    
    def _process_result(self, result):
        """处理推理结果"""
        try:
            if self.task_type == "segment":
                if not hasattr(result, 'masks') or result.masks is None:
                    return None
                masks_xy = result.masks.xy
                if len(masks_xy) == 0:
                    return None
                return masks_xy[0]
            else:  # pose
                if not hasattr(result, 'keypoints') or result.keypoints is None:
                    return None
                return result.keypoints
        except Exception as e:
            logger.debug(f"结果处理错误 ({self.task_type}): {e}")
            return None
    
    def add_task(self, image_data, task_id, callback):
        """添加任务 - 队列满时清空旧任务保留新任务"""
        task = {
            'image_data': image_data,
            'task_id': task_id,
            'callback': callback
        }
        
        try:
            self.task_queue.put_nowait(task)
            logger.debug(f"核心 {self.core_id}: 成功添加任务 {task_id}，队列大小: {self.task_queue.qsize()}")
            return True
        except queue.Full:
            # 队列满时，清空所有未处理的任务，保留新任务
            dropped_count = self._clear_queue_and_add_new(task)
            logger.warning(f"核心 {self.core_id} 任务队列已满，清空了 {dropped_count} 个旧任务，添加新任务 {task_id}")
            return True
    
    def _clear_queue_and_add_new(self, new_task):
        """清空队列并添加新任务"""
        dropped_count = 0
        dropped_task_ids = []
        
        # 清空队列中所有未处理的任务
        while True:
            try:
                old_task = self.task_queue.get_nowait()
                dropped_count += 1
                dropped_task_ids.append(old_task['task_id'])
                
                # 对被丢弃的任务调用回调，返回None表示任务被取消
                if old_task.get('callback'):
                    try:
                        old_task['callback'](self.task_type, old_task['task_id'], None, 0)
                    except Exception as e:
                        logger.error(f"调用被丢弃任务 {old_task['task_id']} 的回调时出错: {e}")
                        
            except queue.Empty:
                break
        
        # 添加新任务
        try:
            self.task_queue.put_nowait(new_task)
            if dropped_count > 0:
                logger.info(f"核心 {self.core_id}: 丢弃的任务IDs: {dropped_task_ids}")
        except queue.Full:
            # 理论上不应该发生，因为我们刚清空了队列
            logger.error(f"核心 {self.core_id}: 清空队列后仍然无法添加新任务")
            dropped_count += 1
        
        return dropped_count
    
    def wait_for_ready(self, timeout=30):
        """等待模型加载完成"""
        return self.model_ready.wait(timeout)
    
    def stop(self):
        """停止工作器"""
        logger.info(f"正在停止NPU核心 {self.core_id} 工作器...")
        self.running = False
        
        # 兼容不同Python版本的shutdown方法
        try:
            # Python 3.9+
            self.executor.shutdown(wait=True, timeout=5.0)
        except TypeError:
            # 较老版本的Python
            self.executor.shutdown(wait=True)
            
        logger.info(f"NPU核心 {self.core_id} 工作器已停止")

class SegPose:
    """按时间戳排序的高性能SegPose - 支持过时任务丢弃"""
    def __init__(self):
        logger.info("初始化时间戳排序SegPose...")
        
        # 检查模型文件是否存在
        if not os.path.exists(SEG_MODEL_PATH):
            logger.error(f"分割模型文件不存在: {SEG_MODEL_PATH}")
            raise FileNotFoundError(f"分割模型文件不存在: {SEG_MODEL_PATH}")
        
        if not os.path.exists(POSE_MODEL_PATH):
            logger.error(f"姿态模型文件不存在: {POSE_MODEL_PATH}")
            raise FileNotFoundError(f"姿态模型文件不存在: {POSE_MODEL_PATH}")
        
        logger.info(f"模型文件检查通过 - SEG: {SEG_MODEL_PATH}, POSE: {POSE_MODEL_PATH}")
        
        # 创建NPU工作器
        try:
            logger.info("创建分割模型工作器...")
            self.seg_worker = NPUWorker(SEG_CORE, SEG_MODEL_PATH, "segment", SEG_WORKER_THREADS)
        except Exception as e:
            logger.error(f"创建分割模型工作器失败: {e}")
            raise RuntimeError(f"创建分割模型工作器失败: {e}")
        
        try:
            logger.info("创建姿态模型工作器...")
            self.pose_worker = NPUWorker(POSE_CORE, POSE_MODEL_PATH, "pose", POSE_WORKER_THREADS)
        except Exception as e:
            logger.error(f"创建姿态模型工作器失败: {e}")
            # 如果姿态模型创建失败，需要停止已创建的分割模型工作器
            self.seg_worker.stop()
            raise RuntimeError(f"创建姿态模型工作器失败: {e}")
        
        # 等待模型加载完成
        logger.info("等待分割模型加载完成...")
        seg_ready = self.seg_worker.wait_for_ready(10)
        logger.info(f"分割模型等待结果: {seg_ready}")
        
        if not seg_ready:
            logger.error("分割模型加载超时，但可能已经加载完成，继续尝试...")
        
        logger.info("分割模型加载完成")
            
        logger.info("等待姿态模型加载完成...")
        pose_ready = self.pose_worker.wait_for_ready(10)
        logger.info(f"姿态模型等待结果: {pose_ready}")
        
        if not pose_ready:
            logger.error("姿态模型加载超时，但可能已经加载完成，继续尝试...")
        
        logger.info("姿态模型加载完成")
        logger.info("所有模型加载完成")
        
        # 任务管理
        self.task_counter = 0
        self.lock = threading.Lock()
        
        # 结果存储 - 使用时间戳排序的优先队列
        self.temp_results = {}  # task_id -> {'seg': result, 'pose': result, 'image_data': ImageData, 'timestamp': float}
        self.temp_results_lock = threading.Lock()
        
        # 使用堆来维护按时间戳排序的完成结果
        self.completed_heap = []  # 存储 (timestamp, task_id, result_data)
        self.completed_heap_lock = threading.Lock()
        
        # 记录最新已输出的时间戳，用于判断过时任务
        self.latest_output_timestamp = 0.0
        self.output_timestamp_lock = threading.Lock()
        
        # 性能统计
        self.stats = {
            'total_put': 0,
            'total_complete': 0,
            'total_get': 0,
            'get_hit': 0,
            'get_miss': 0,
            'outdated_dropped': 0,  # 新增：过时任务丢弃数量
            'queue_cleared': 0,     # 新增：队列清空次数
            'tasks_dropped_in_queue': 0,  # 新增：在队列中被丢弃的任务数量
        }
        self.stats_lock = threading.Lock()
        
        logger.info("时间戳排序SegPose初始化完成")
    
    def put(self, image_data: ImageData):
        """非阻塞地提交ImageData处理任务"""
        with self.lock:
            self.task_counter += 1
            task_id = self.task_counter
        
        # 从时间戳字符串转换为浮点数用于排序
        try:
            timestamp_float = float(image_data.timestamp)
        except ValueError:
            # 如果时间戳不是数字，使用当前时间
            timestamp_float = time.time() * 1000
            logger.warning(f"任务 {task_id} 时间戳格式错误，使用当前时间: {timestamp_float}")
        
        # 初始化临时结果记录，保存原始ImageData和时间戳
        with self.temp_results_lock:
            self.temp_results[task_id] = {
                'seg': None, 
                'pose': None, 
                'image_data': image_data,
                'timestamp': timestamp_float
            }
        
        # 创建结果回调函数
        def result_callback(task_type, tid, result, inference_time):
            self._handle_result(tid, task_type, result)
        
        # 同时提交到两个NPU
        seg_ok = self.seg_worker.add_task(image_data, task_id, result_callback)
        pose_ok = self.pose_worker.add_task(image_data, task_id, result_callback)
        
        if not seg_ok or not pose_ok:
            # 如果提交失败，清理临时结果记录
            with self.temp_results_lock:
                self.temp_results.pop(task_id, None)
            logger.warning(f"任务 {task_id} 提交失败")
            return None
        
        # 更新统计
        with self.stats_lock:
            self.stats['total_put'] += 1
        
        logger.debug(f"任务 {task_id} 已提交，时间戳: {image_data.timestamp}")
        return task_id
    
    def _handle_result(self, task_id, task_type, result):
        """处理单个结果回调"""
        logger.debug(f"收到结果回调: 任务{task_id}, 类型{task_type}, 结果是否为空: {result is None}")
        
        # 如果结果为None，可能是任务被取消了
        if result is None:
            logger.info(f"任务 {task_id} 的 {task_type} 结果为空，可能已被取消")
            with self.temp_results_lock:
                self.temp_results.pop(task_id, None)
            return
        
        with self.temp_results_lock:
            if task_id not in self.temp_results:
                logger.warning(f"收到未知任务 {task_id} 的结果")
                return
            
            # 保存结果
            if task_type == "segment":
                self.temp_results[task_id]['seg'] = result
            elif task_type == "pose":
                self.temp_results[task_id]['pose'] = result
            
            # 检查是否两个结果都完成了
            task_result = self.temp_results[task_id]
            seg_ready = task_result['seg'] is not None
            pose_ready = task_result['pose'] is not None
            
            logger.debug(f"任务 {task_id} 状态检查: seg_ready={seg_ready}, pose_ready={pose_ready}")
            
            if seg_ready and pose_ready:
                logger.info(f"任务 {task_id} 完成，seg和pose结果都已收到")
                
                # 检查是否已经过时
                timestamp = task_result['timestamp']
                with self.output_timestamp_lock:
                    if timestamp < self.latest_output_timestamp:
                        # 该任务已过时，丢弃
                        logger.warning(f"任务 {task_id} (时间戳: {timestamp}) 已过时，最新输出时间戳: {self.latest_output_timestamp}，丢弃该任务")
                        self.temp_results.pop(task_id, None)
                        with self.stats_lock:
                            self.stats['outdated_dropped'] += 1
                        return
                
                # 两个结果都完成且未过时，放入优先队列
                try:
                    result_data = {
                        'task_id': task_id,
                        'seg_result': task_result['seg'],
                        'pose_result': task_result['pose'],
                        'image_data': task_result['image_data'],
                        'timestamp': timestamp
                    }
                    
                    with self.completed_heap_lock:
                        # 使用堆来维护按时间戳排序的结果
                        heapq.heappush(self.completed_heap, (timestamp, task_id, result_data))
                        logger.debug(f"任务 {task_id} 结果已放入时间戳排序堆，堆大小: {len(self.completed_heap)}")
                    
                    # 清理临时结果
                    self.temp_results.pop(task_id, None)
                    
                    # 更新统计
                    with self.stats_lock:
                        self.stats['total_complete'] += 1
                        
                except Exception as e:
                    logger.error(f"任务 {task_id} 放入堆时出错: {e}")
                    self.temp_results.pop(task_id, None)
            else:
                logger.debug(f"任务 {task_id} 部分完成: seg={seg_ready}, pose={pose_ready}")
    
    def get_result(self):
        """按时间戳顺序获取最早的完成结果，返回 (seg_result, pose_result, image_data)，没有结果返回 (None, None, None)"""
        with self.stats_lock:
            self.stats['total_get'] += 1
        
        with self.completed_heap_lock:
            if not self.completed_heap:
                # 堆为空，返回None
                with self.stats_lock:
                    self.stats['get_miss'] += 1
                return None, None, None
            
            # 从堆顶获取时间戳最早的结果
            timestamp, task_id, result_data = heapq.heappop(self.completed_heap)
            
            logger.info(f"成功获取到完成的结果: 任务{task_id}, 时间戳: {timestamp}")
            
            # 更新最新输出时间戳
            with self.output_timestamp_lock:
                self.latest_output_timestamp = max(self.latest_output_timestamp, timestamp)
            
            with self.stats_lock:
                self.stats['get_hit'] += 1
            
            # 清理堆中所有过时的任务
            self._cleanup_outdated_results()
            
            return result_data['seg_result'], result_data['pose_result'], result_data['image_data']
    
    def _cleanup_outdated_results(self):
        """清理堆中所有过时的结果"""
        # 注意：这个方法应该在已经持有completed_heap_lock的情况下调用
        cleaned_heap = []
        dropped_count = 0
        
        with self.output_timestamp_lock:
            latest_ts = self.latest_output_timestamp
        
        while self.completed_heap:
            timestamp, task_id, result_data = self.completed_heap[0]  # 查看堆顶
            if timestamp < latest_ts:
                # 过时的结果，丢弃
                heapq.heappop(self.completed_heap)
                dropped_count += 1
                logger.debug(f"清理过时任务 {task_id} (时间戳: {timestamp})")
            else:
                # 不过时，保留
                break
        
        if dropped_count > 0:
            logger.info(f"清理了 {dropped_count} 个过时的堆结果")
            with self.stats_lock:
                self.stats['outdated_dropped'] += dropped_count
    
    def get_pending_count(self):
        """获取待处理和已完成但未取走的任务数量"""
        with self.temp_results_lock:
            processing = len(self.temp_results)
        
        with self.completed_heap_lock:
            completed_pending = len(self.completed_heap)
        
        return {
            'processing': processing,
            'completed_pending': completed_pending,
            'total_pending': processing + completed_pending
        }
    
    def get_stats(self):
        """获取性能统计"""
        with self.stats_lock:
            stats = self.stats.copy()
        
        pending = self.get_pending_count()
        stats.update(pending)
        
        # 计算命中率
        if stats['total_get'] > 0:
            stats['hit_rate'] = stats['get_hit'] / stats['total_get']
        else:
            stats['hit_rate'] = 0.0
        
        # 计算过时任务比例
        if stats['total_complete'] + stats['outdated_dropped'] > 0:
            stats['outdated_rate'] = stats['outdated_dropped'] / (stats['total_complete'] + stats['outdated_dropped'])
        else:
            stats['outdated_rate'] = 0.0
        
        with self.output_timestamp_lock:
            stats['latest_output_timestamp'] = self.latest_output_timestamp
        
        return stats
    
    def clear_old_results(self, max_age_seconds=60):
        """清理过期的临时结果和堆结果"""
        current_time = time.time() * 1000  # 转换为毫秒
        cutoff_time = current_time - max_age_seconds * 1000
        cleared = 0
        
        # 清理临时结果中的过期任务
        with self.temp_results_lock:
            expired_tasks = []
            for tid, results in self.temp_results.items():
                if results['timestamp'] < cutoff_time:
                    expired_tasks.append(tid)
            
            for tid in expired_tasks:
                self.temp_results.pop(tid, None)
                cleared += 1
        
        # 清理堆中的过期结果
        with self.completed_heap_lock:
            cleaned_heap = []
            while self.completed_heap:
                timestamp, task_id, result_data = heapq.heappop(self.completed_heap)
                if timestamp >= cutoff_time:
                    cleaned_heap.append((timestamp, task_id, result_data))
                else:
                    cleared += 1
            
            # 重建堆
            self.completed_heap = cleaned_heap
            heapq.heapify(self.completed_heap)
        
        if cleared > 0:
            logger.info(f"清理了 {cleared} 个过期任务")
            with self.stats_lock:
                self.stats['outdated_dropped'] += cleared
        
        return cleared
    
    def shutdown(self):
        """关闭SegPose"""
        logger.info("正在关闭SegPose...")
        
        # 输出最终统计
        stats = self.get_stats()
        logger.info(f"最终统计: {stats}")
        
        # 停止workers
        self.seg_worker.stop()
        self.pose_worker.stop()
        
        logger.info("SegPose已关闭")

if __name__ == "__main__":
    logger.info("开始SegPose测试...")
    
    seg_pose = SegPose()
    
    # 测试配置
    test_dir = "./data/Color1"
    image_files = [f for f in os.listdir(test_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
    
    if not image_files:
        logger.error(f"在目录 {test_dir} 中未找到图片文件")
        seg_pose.shutdown()
        exit(1)
    
    logger.info(f"找到 {len(image_files)} 张测试图片")
    
    # 预加载图片并创建ImageData对象，模拟不同时间戳
    test_image_data = []
    base_timestamp = int(time.time() * 1000)
    
    for i, img_file in enumerate(image_files[300:310]):  # 只测试10张
        img_path = os.path.join(test_dir, img_file)
        img = cv2.imread(img_path)
        if img is not None:
            # 创建递增时间戳的ImageData对象
            image_data = ImageData(
                timestamp=str(base_timestamp + i * 100),  # 每张图片间隔100ms
                readable_timestamp=datetime.fromtimestamp((base_timestamp + i * 100)/1000).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3],
                color_img=img,
                depth_img=np.zeros_like(img[:,:,0]),  # 模拟深度图
                color_path=img_path
            )
            test_image_data.append((img_file, image_data))
    
    logger.info(f"创建了 {len(test_image_data)} 个带时间戳的ImageData对象")
    
    # 测试：快速提交任务（模拟乱序完成）
    logger.info("开始提交任务...")
    submitted_tasks = []
    
    start_time = time.time()
    for i, (img_file, image_data) in enumerate(test_image_data):
        task_id = seg_pose.put(image_data)
        if task_id:
            submitted_tasks.append((task_id, image_data.timestamp))
            logger.info(f"提交任务 {task_id}, 时间戳: {image_data.timestamp}")
        
        # 稍微延迟以便观察时间戳排序效果
        time.sleep(0.1)
    
    submit_time = time.time() - start_time
    logger.info(f"提交完成，用时 {submit_time:.2f}s")
    
    # 测试：按时间戳顺序获取结果
    logger.info("开始按时间戳顺序获取结果...")
    collected_results = []
    last_stats_time = time.time()
    
    while len(collected_results) < len(submitted_tasks):
        seg_result, pose_result, image_data = seg_pose.get_result()
        
        if seg_result is not None and pose_result is not None and image_data is not None:
            collected_results.append((seg_result, pose_result, image_data))
            
            logger.info(f"获取结果 {len(collected_results)}: 时间戳 {image_data.timestamp}, "
                       f"可读时间 {image_data.readable_timestamp}")
            
            # 验证时间戳顺序
            if len(collected_results) > 1:
                prev_ts = float(collected_results[-2][2].timestamp)
                curr_ts = float(image_data.timestamp)
                if curr_ts < prev_ts:
                    logger.error(f"时间戳顺序错误！上一个: {prev_ts}, 当前: {curr_ts}")
                else:
                    logger.info(f"✅ 时间戳顺序正确")
        else:
            # 没有结果时短暂休眠
            time.sleep(0.05)
        
        # 每秒打印一次统计
        if time.time() - last_stats_time > 2.0:
            stats = seg_pose.get_stats()
            logger.info(f"统计: {stats}")
            last_stats_time = time.time()
    
    total_time = time.time() - start_time
    logger.info(f"测试完成！")
    logger.info(f"总耗时: {total_time:.2f}s")
    logger.info(f"平均处理速度: {len(collected_results)/total_time:.1f} 张/秒")
    
    # 验证结果时间戳排序
    logger.info("验证结果时间戳排序...")
    timestamps = [float(result[2].timestamp) for result in collected_results]
    is_sorted = all(timestamps[i] <= timestamps[i+1] for i in range(len(timestamps)-1))
    logger.info(f"结果按时间戳排序: {'✅ 正确' if is_sorted else '❌ 错误'}")
    
    # 打印时间戳序列进行验证
    logger.info("时间戳序列:")
    for i, ts in enumerate(timestamps):
        logger.info(f"  结果 {i+1}: {ts}")
    
    # 最终统计
    final_stats = seg_pose.get_stats()
    logger.info(f"最终统计: {final_stats}")
    logger.info(f"过时任务丢弃率: {final_stats.get('outdated_rate', 0)*100:.1f}%")
    
    seg_pose.shutdown()