import cv2 
from ultralytics import YOLO 
import serial 
import torch 
import os 
import numpy as np 
from filterpy.kalman  import UnscentedKalmanFilter 
#from filterpy.ptsampler  import MerweScaledSigmaPoints
from filterpy.kalman  import MerweScaledSigmaPoints 
import threading 
import time 
 
# 串口初始化 
ser = serial.Serial("/dev/ttyTHS1", 115200) 
 
if not ser.isOpen():  
    print("serial open failed") 
else: 
    print("serial open success: ") 
print(ser) 
 
# Load the YOLO model 
model = YOLO("/home/jetson/ultralytics/ultralytics/yahboom_demo/mode/4/best.engine")  
 
iou_threshold = 0.1 
conf_threshold = 0.1 
max_det = 1 
 
# Open the cammera 
cap = cv2.VideoCapture(0) 
win_width = 640 
win_hight = 480 
cap.set(6,  cv2.VideoWriter.fourcc('M',  'J', 'P', 'G')) 
cap.set(cv2.CAP_PROP_FRAME_WIDTH,  win_width) 
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,  win_hight) 
 
# Get the video frame size and frame rate 
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))  
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  
fps = int(cap.get(cv2.CAP_PROP_FPS))  
fps=100
 
# Define the codec and create a VideoWriter object to output the processed video 
output_path = "/home/jetson/ultralytics/ultralytics/output/06.orange_camera_usb.mp4"  
fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # You can use 'XVID' or 'mp4v' depending on your platform 
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height)) 
 
# 定义无迹卡尔曼滤波器 
def fx(x, dt): 
    # 状态转移函数，这里简单假设目标匀速运动 
    F = np.array([[1,  dt, 0, 0], 
                  [0, 1, 0, 0], 
                  [0, 0, 1, dt], 
                  [0, 0, 0, 1]]) 
    return np.dot(F,  x) 
 
def hx(x): 
    # 测量函数，只测量位置 
    return np.array([x[0],  x[2]]) 
 
# 初始化UKF 
points = MerweScaledSigmaPoints(4, alpha=.001, beta=2., kappa=-1) 
ukf = UnscentedKalmanFilter(dim_x=4, dim_z=2, dt=1/fps, fx=fx, hx=hx, points=points) 
ukf.x = np.array([0,  0, 0, 0])  # 初始状态 
ukf.P *= 1000.  # 初始协方差 
ukf.R = np.diag([0.1,  0.1])  # 测量噪声协方差 
ukf.Q = np.eye(4)  * 0.01  # 过程噪声协方差 
 
# 共享变量 
target_position = [0, 0,0,0,0] 
lock = threading.Lock() 
loss_count=0
track=0
 
# 线程函数：以100Hz的频率通过串口发送目标位置坐标 
def send_position(): 
    while True: 
        start_time = time.time()  
        with lock: 
            x1, x2,y1,y2,count = target_position[0], target_position[1],target_position[2],target_position[3],target_position[4]
            if x1 <= 0: 
                x1 = 0 
            if x2 <= 0: 
                x2 = 0 
            if y1 <= 0: 
                y1 = 0 
            if y2 <= 0: 
                y2 = 0 
 
            if x1 >= 0.9999: 
                x1 = 0.99990 
            if x2 >= 0.9999: 
                x2 = 0.99990 
            if y1 >= 0.9999: 
                y1 = 0.99990 
            if y2 >= 0.9999: 
                y2 = 0.99990 
        ser.write(f"({x1:.4f},{x2:.4f},{y1:.4f},{y2:.4f},{count})\r\n".encode())  
        elapsed_time = time.time()  - start_time 
        sleep_time = 0.01 - elapsed_time 
        if sleep_time > 0: 
            time.sleep(sleep_time)  
 
# 启动线程 
thread = threading.Thread(target=send_position) 
thread.daemon  = True 
thread.start()  
 
# Loop through the video frames 
while cap.isOpened():  
    # Read a frame from the video 
    success, frame = cap.read()  
    if not success: 
        break 
 
    if success: 
        # Run YOLO inference on the frame 
        results = model(frame, iou=iou_threshold, conf=conf_threshold, max_det=max_det) 
 
        # Visualize the results on the frame 
        annotated_frame = results[0].plot() 
        boxes = results[0].boxes 
 
        xyxy = boxes.xyxyn  
        conf = boxes.conf  
        cls = boxes.cls  
 
        xyxy_cpu = xyxy.cpu()  
        conf_cpu = conf.cpu()  
        cls_cpu = cls.cpu()  
 
        target_count = conf_cpu.numel()  
        x1, y1, x2, y2 = 0, 0, 0, 0 
        center_x, center_y = 0, 0  # 目标中心坐标 
        print(conf_cpu) 
        if target_count > 0: 
            max_conf, max_idx = torch.max(conf_cpu,  dim=0) 
            best_xyxy = xyxy_cpu.tolist()[max_idx]  
            x1, y1, x2, y2 = best_xyxy 
 
            # 计算检测框中心（归一化坐标） 
            center_x = (x1 + x2) / 2 
            center_y = (y1 + y2) / 2 
 
            if x1 <= 0: 
                x1 = 0 
            if x2 <= 0: 
                x2 = 0 
            if y1 <= 0: 
                y1 = 0 
            if y2 <= 0: 
                y2 = 0 
 
            if x1 >= 0.9999: 
                x1 = 0.99990 
            if x2 >= 0.9999: 
                x2 = 0.99990 
            if y1 >= 0.9999: 
                y1 = 0.99990 
            if y2 >= 0.9999: 
                y2 = 0.99990 
 
            print(f"xyxy: x1={x1:.4f}, y1={y1:.4f}, x2={x2:.4f}, y2={y2:.4f}") 
 
            # 更新UKF状态 
            loss_count=0
            track=1
            measurement = np.array([center_x,  center_y]) 
            ukf.predict()  
            ukf.update(measurement)  
            predicted_center_x, _, predicted_center_y, _ = ukf.x 
 
            with lock: 
                target_position = [x1, x2,y1,y2,1] 
 
        else: 
            # 没有检测到目标，继续预测 
            loss_count=loss_count+1
            track=0
            if loss_count<10:
                
                ukf.predict()  
                predicted_center_x, _, predicted_center_y, _ = ukf.x 
                with lock: 
                    target_position = [predicted_center_x, predicted_center_y,0,0,2] 
            else:
                with lock: 
                    target_position =[0,0,0,0,0]
            print(loss_count)
 
        # 显示状态信息 
        cv2.line(annotated_frame,  (int(0.25 * win_width), 0), (int(0.25 * win_width), win_hight), (255, 0, 0), 2, cv2.LINE_AA) 
        cv2.line(annotated_frame,  (int(0.35 * win_width), 0), (int(0.35 * win_width), win_hight), (0, 0, 255), 2, cv2.LINE_AA) 
        cv2.line(annotated_frame,  (int(0.5 * win_width), 0), (int(0.5 * win_width), win_hight), (0, 255, 0), 2, cv2.LINE_AA) 
        cv2.line(annotated_frame,  (int(0.65 * win_width), 0), (int(0.65 * win_width), win_hight), (0, 0, 255), 2, cv2.LINE_AA) 
        cv2.line(annotated_frame,  (int(0.75 * win_width), 0), (int(0.75 * win_width), win_hight), (255, 0, 0), 2, cv2.LINE_AA) 
        cv2.circle(annotated_frame,  (int(win_width * 0.5), int(win_hight * 0.48)), radius=2, color=(0, 0, 255), thickness=-1) 
        x, y = predicted_center_x,predicted_center_y 
        if track==0:
            cv2.circle(annotated_frame,  (int(x * win_width), int(y * win_hight)), radius=4, color=(0, 255, 0), thickness=2)
        else:
            cv2.circle(annotated_frame,  (int(center_x * win_width), int(center_y * win_hight)), radius=2, color=(255, 0, 0), thickness=2) 
 
        # Display the annotated frame 
        cv2.imshow("YOLO  + UKF Tracking", cv2.resize(annotated_frame,  (win_width, win_hight))) 
 
        # Break the loop if 'q' is pressed 
        if cv2.waitKey(1)  & 0xFF == ord("q"): 
            break 
    else: 
        # Break the loop if the end of the video is reached 
        break 
 
# Release the video capture and writer objects, and close the display window 
cap.release()  
# out.release()  
cv2.destroyAllWindows()  
