import cv2
from cv2.datasets import none
import torch
import time , os , pandas as pd
import numpy as np
from ultralytics import YOLO
from validate import validate_model , show_all_val
from kalman_filter import KalmanFilterTracker
os.environ["OPENCV_VIDEOIO_PRIORITY"] = "GTK"


class IDOLTracker:

    def __init__(self, model_path='yolov8s.pt'):

        try:
            self.device = torch.device('cuda:0')
            print(f"使用设备: {self.device}")
        except:
            self.device = torch.device('cpu')
            print(f"CUDA不可用，使用CPU: {self.device}")
        
        # 加载模型
        self.model = YOLO(model_path)
        
        # 将模型移动到指定设备
        self.model.model.to(self.device)
        print(f"模型加载到设备: {self.device}")
        
        # FPS计算变量
        self.prev_time = time.time()
        self.fps = 0
        self.fps_list = []  # 用于平滑FPS计算
        
        # self.motion_calc=MotionCalc()
        self.conf_list=[[] for _ in range(99)]
        self.current_frame=None

        self.kalman=KalmanFilterTracker(dt=0.1,
                                        process_noise=5.0,
                                        measurement_noise=0.001,
                                        response_speed=70.0,
                                        history=10)
     

        self.center_history = {}  # 格式: {cls: [(x1, y1), (x2, y2), ...]}
        self.box_dimensions = {}  # 存储每个类别的矩形框尺寸 {cls: (width, height)}
        self.last_detection_frame = {}  # 记录每个类别最后检测到的帧号
        self.current_frame_count = 0  # 当前帧计数，确保初始化为0以避免预测逻辑问题

    def draw_center_trail(self, annotated_frame, cls, current_center, length=5, color=(255, 255, 0)):
        """
        绘制目标中心点及其运动轨迹
        
        Args:
            annotated_frame: 要绘制的图像帧
            cls: 目标类别
            current_center: 当前中心点坐标 (x, y)
            length: 轨迹长度，即显示多少个历史点
            color: 轨迹和中心点的颜色
        """
        # 为每个目标创建唯一键
        key = cls
        
        # 确保该目标的历史记录存在
        if key not in self.center_history:
            self.center_history[key] = []
        
        # 添加当前中心点到历史记录
        self.center_history[key].append(current_center)
        
        # 保持历史记录长度不超过指定值
        if len(self.center_history[key]) > length:
            self.center_history[key] = self.center_history[key][-length:]
        
        # 绘制轨迹线：连接相邻的历史点
        if len(self.center_history[key]) > 1:
            for i in range(1, len(self.center_history[key])):
                cv2.line(annotated_frame, 
                        (int(self.center_history[key][i-1][0]), int(self.center_history[key][i-1][1])),
                        (int(self.center_history[key][i][0]), int(self.center_history[key][i][1])),
                        color, 
                        2)  # 线宽为2
        
        # 绘制当前中心点
        cv2.circle(annotated_frame, 
                  (int(current_center[0]), int(current_center[1])), 
                  5, 
                  color, 
                  -1)  # 填充圆
    
    def calculate_fps(self):
        """计算并平滑FPS值"""
        current_time = time.time()
        frame_time = current_time - self.prev_time
        self.prev_time = current_time
        
        # 避免除零错误
        if frame_time == 0:
            frame_time = 0.001
            
        # 计算当前帧FPS
        current_fps = 1.0 / frame_time
        
        # 使用滑动平均平滑FPS（最近10帧的平均值）
        self.fps_list.append(current_fps)
        if len(self.fps_list) > 10:
            self.fps_list.pop(0)
            
        self.fps = sum(self.fps_list) / len(self.fps_list)
        
        return self.fps
    
    def draw_fps_info(self, frame, fps):
        """在画面上绘制FPS信息"""
        fps_text = f"FPS: {fps:.1f}"
        cv2.putText(frame, fps_text, (10, 30), 
                   cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        
        # 添加设备信息
        device_text = f"Device: {'GPU' if self.device.type == 'cuda' else 'CPU'}"
        cv2.putText(frame, device_text, (10, 70), 
                   cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        
        return frame
    
    def draw_all_targets(self,cls,id,conf,x1,x2,y1,y2,center,draw_center,annotated_frame,velocity,acceleration):
        if cls==1:
            cv2.rectangle(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (206,160,79), 2)
            cv2.putText(annotated_frame,f'temari:{conf:.2f} id:{id}',(int(x1),int(y1)-10),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0),2)
        else:
            cv2.rectangle(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (86,211,250), 2)
            cv2.putText(annotated_frame,f'kotone:{conf:.2f} id:{id}',(int(x1),int(y1)-10),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0),2)
        cv2.putText(annotated_frame,f'TRK',(1230,50),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,255),2)
        cv2.putText(annotated_frame,f'V:{velocity}',(int(x1),int(y1)-60),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0),2)
        cv2.putText(annotated_frame,f'A:{acceleration}',(int(x1),int(y1)-90),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0),2)
        if draw_center:
            cv2.putText(annotated_frame,str(center),(int(x1),int(y1)-30),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0),2)
            cv2.circle(annotated_frame,center,5,(0,0,255),-1)
    
    def save_missed_pics(self,conf,cls,raw_frame):
        if conf<=0.7:
            self.conf_list[cls].append(conf)
            if len(self.conf_list[cls])>7:
                timestamp = time.strftime("%Y%m%d_%H%M%S")
                ave_conf=sum(self.conf_list[cls])/len(self.conf_list[cls])
                filename = f"missed_pics/tracking_capture_{timestamp}_{ave_conf:.2f}.jpg"
                cv2.imwrite(filename, raw_frame)
                print(f"保存低置信度图片: {filename}, 置信度: {ave_conf:.2f}")
                self.conf_list[cls]=[]
        else:
            self.conf_list[cls]=[]
    
    def mouse_callback(self,event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:  # 左键点击
            if self.current_frame is not None:
                time_stamp = int(time.time())
                cv2.imwrite(f'captured_photos/pic_{time_stamp}.jpg', self.current_frame)
                print(f'照片保存为captured_photos/pic_{time_stamp}.jpg')

    def run(self, camera_id=0, window_name=""):

        cap = cv2.VideoCapture(camera_id,cv2.CAP_DSHOW)
        exp=-8 
        if not cap.isOpened():
            return
        
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
        cap.set(cv2.CAP_PROP_FPS, 30)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
        cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0) 
        cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
        cv2.setWindowProperty(window_name, cv2.WND_PROP_TOPMOST, 1)
        cv2.resizeWindow(window_name, 1280, 720)

        cv2.setMouseCallback(window_name, self.mouse_callback)

        time.sleep(0.5)
        
        try:
            while True:
                # 读取帧
                ret, frame = cap.read()
                if not ret:
                    break
                self.current_frame=frame.copy()
                fps = self.calculate_fps()
                annotated_frame = self.draw_fps_info(frame, fps)

                self.current_frame_count += 1
                
                with torch.no_grad():  # 禁用梯度计算以提升性能
                    results = self.model.track(
                        source=frame,
                        tracker="botsort.yaml",  # 使用botsort.yaml追踪器
                        conf=0.5,                  # 置信度阈值
                        iou=0.5,                   # IOU阈值
                        imgsz=640,                 # 输入尺寸
                        half=True,                 # 使用FP16加速
                        device=0,                  # 使用GPU 0
                        persist=True,              # 维持追踪ID
                        verbose=False,             # 关闭详细输出
                        rect=True,
                    )
                
                # 标记当前帧中检测到的类别
                detected_classes = set()
                
                if results[0].boxes.cpu().numpy().id is not None:
                    
                    for box in results[0].boxes:
                        if box.id is not None:
                            id=box.id.cpu().numpy().astype(int)[0]
                            cls=box.cls.cpu().numpy().astype(int)[0]
                            conf=box.conf.cpu().numpy().astype(float)[0] 
                            x1,y1,x2,y2=np.array(box.xyxy.cpu()).astype(int)[0]
                            width=x2-x1
                            height=y2-y1
                            center=(int((x1+x2)/2),int((y1+y2)/2))
                            
                            # 记录检测到的类别
                            detected_classes.add(cls)
                            
                            # 记录矩形框尺寸和最后检测帧号
                            self.box_dimensions[cls] = (width, height)
                            self.last_detection_frame[cls] = self.current_frame_count
                            
                            # 使用卡尔曼滤波更新位置
                            updated_center, velocity, acceleration, norm_velocity, norm_acceleration = self.kalman.update(cls, center)
                            
                            # 使用更新后的位置作为最终输出
                            final_center = updated_center
                            
                            # 绘制目标框和轨迹
                            self.draw_all_targets(cls, id, conf, x1, x2, y1, y2, final_center, False, annotated_frame, norm_velocity, norm_acceleration)
                            self.draw_center_trail(annotated_frame, cls, final_center, length=5, color=(255, 255, 0))
                            
                            self.save_missed_pics(conf, cls, self.current_frame)
                
                # 处理未检测到但之前检测过的类别（目标丢失情况）
                max_prediction_frames = 30   # 最大预测帧数，避免预测框久久不消失
                active_classes = self.kalman.get_active_classes()
                
                for cls in active_classes:
                    if cls not in detected_classes and cls in self.last_detection_frame:
                        # 计算距离最后检测到的帧数
                        frames_since_last_detection = self.current_frame_count - self.last_detection_frame[cls]
                        
                        # 只在合理的时间窗口内进行预测
                        if frames_since_last_detection <= max_prediction_frames and cls in self.box_dimensions:
                            try:
                                # 使用卡尔曼滤波预测位置
                                predicted_center = self.kalman.predict(cls)
                                
                                # 使用预测的位置作为最终输出
                                final_center = predicted_center
                                
                                # 获取之前记录的矩形框尺寸
                                width, height = self.box_dimensions[cls]
                                
                                # 计算预测的矩形框坐标
                                x1 = final_center[0] - width // 2
                                y1 = final_center[1] - height // 2
                                x2 = final_center[0] + width // 2
                                y2 = final_center[1] + height // 2
                                
                                # 绘制预测框（使用不同颜色表示预测）
                                # 根据丢失帧数改变颜色深浅
                                alpha = max(0.3, 1.0 - (frames_since_last_detection / max_prediction_frames))
                                color_value = int(128 * alpha)  # 从红色(255)逐渐变到暗红色(128*0.3≈38)
                                if cls == 1:
                                    cv2.rectangle(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (206, color_value, color_value), 2)  # 黄色渐变
                                else:
                                    cv2.rectangle(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (color_value, 211, 250), 2)  # 蓝色渐变
                                
                                # 绘制预测的中心轨迹
                                self.draw_center_trail(annotated_frame, cls, final_center, length=5, color=(0, 0, 255))  # 红色表示预测
                                
                                # 标记为预测框
                                pred_text = f"PRED (frames: {frames_since_last_detection})"
                                cv2.putText(annotated_frame, pred_text, (int(x1), int(y1)-10), 
                                           cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                                
                            except Exception as e:
                                print(f"预测时出错: {e}")
                        
                        # 如果超过最大预测帧数，重置跟踪器
                        elif frames_since_last_detection > max_prediction_frames:
                            self.kalman.reset_tracking(cls)

                        
                
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q') or key == 27:  # 按q退出或esc
                    break
                
                elif key == ord('s') or key == ord(' '):  # 按s保存当前帧
                    timestamp = time.strftime("%Y%m%d_%H%M%S")
                    filename = f"missed_pics/tracking_capture_{timestamp}.jpg"
                    cv2.imwrite(filename, self.current_frame)
                    print(f"截图已保存: {filename}")
                    cv2.putText(annotated_frame, f"Captured: {timestamp}!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
                    cv2.imshow(window_name, annotated_frame)
                    time.sleep(0.15)            

                elif key == ord('9'):  
                    exp = min(exp + 1, -6)
                    print(f'{f"曝光时间增加为:{exp}":<20}', end='\r', flush=True)  # 固定宽度20，确保覆盖整行
                    cap.set(cv2.CAP_PROP_EXPOSURE, exp)
                    
                elif key == ord('3'):  
                    exp = max(exp - 1, -10)
                    print(f'{f"曝光时间减小为:{exp}":<20}', end='\r', flush=True)  # 固定宽度20，确保覆盖整行
                    cap.set(cv2.CAP_PROP_EXPOSURE, exp)

                elif key == ord('0'):  
                    print(f'{f"自动曝光已启用":<20}', end='\r', flush=True)  # 固定宽度20，确保覆盖整行
                    cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0)

                cv2.imshow(window_name, annotated_frame)
        except KeyboardInterrupt:
            print("用户中断")
        finally:
            # 释放资源
            cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0)
            cap.release()
            cv2.destroyAllWindows()


def main():
    runs_dir = 'E:/Py/visual/runs/detect'
    name_list=[]
    count=0
    # show_all_val(runs_dir)
    if os.path.isdir(runs_dir):
        for name in sorted(os.listdir(runs_dir)):
            name_list.append(name)
            print(count,name)
            count+=1
    
    num=input('调用哪个模型？')
    if num == '':
        num=len(name_list)-1
    path='E:/Py/visual/runs/detect/'+name_list[int(num)]+'/weights/best.pt'
    print('模型名称:',name_list[int(num)])
    tracker = IDOLTracker(model_path=path)    
    validate_model(path)
    tracker.run(camera_id=0, window_name="idol tracker")

if __name__ == "__main__":
    main()