# -*- coding: utf-8 -*-
from pyorbbecsdk import Pipeline, Config, OBFormat, OBSensorType
import cv2
import numpy as np
from ultralytics import YOLO
import signal
from PIL import Image, ImageDraw, ImageFont
import threading
from queue import Queue
import serial
import time
import os
import math
import struct

exit_flag = False
selection_active = False
selection_coords = (0, 0, 0, 0)
roi_queue = Queue(maxsize=1)

# 通信协议定义
HEADER = 0xAA
FOOTER = 0x55

def signal_handler(sig, frame):
    global exit_flag
    exit_flag = True
signal.signal(signal.SIGINT, signal_handler)

def mouse_callback(event, x, y, flags, param):
    global selection_active, selection_coords
    if event == cv2.EVENT_LBUTTONDOWN:
        selection_active = True
        selection_coords = (x, y, x, y)
    elif event == cv2.EVENT_MOUSEMOVE and selection_active:
        selection_coords = (selection_coords[0], selection_coords[1], x, y)
    elif event == cv2.EVENT_LBUTTONUP:
        selection_active = False
        x1 = min(selection_coords[0], x)
        y1 = min(selection_coords[1], y)
        x2 = max(selection_coords[0], x)
        y2 = max(selection_coords[1], y)
        if not roi_queue.full():
            roi_queue.put((x1, y1, x2, y2))

CAMERA_CONFIG = {
    "depth": (640, 480, OBFormat.Y16, 30),
    "color": (640, 480, OBFormat.RGB, 30),
    "sync": False
}

CAMERA_MOUNT_HEIGHT = 1.5          # 相机安装高度
CAMERA_TILT_ANGLE = math.radians(30)  # 相机向上倾斜度
BASKET_HEIGHT = 2.4                # 篮筐高度
ROBOT_RELEASE_HEIGHT = 0.15        # 发射机构高度
GRAVITY = 9.81

# 映射表
SPEED_ZONES = [
    {'min': 4.0, 'max': 5.0, 'speed': 800},
    {'min': 3.0, 'max': 4.0, 'speed': 750},
    {'min': 2.0, 'max': 3.0, 'speed': 700},
    {'min': 1.0, 'max': 2.0, 'speed': 650},
    {'min': 0.0, 'max': 1.0, 'speed': 600}
]

# 内参矩阵
CAMERA_MATRIX = np.array([
    [451.872620, 0.000000, 338.310490],
    [0.000000, 454.865469, 249.709555],
    [0.000000, 0.000000, 1.000000]
], dtype=np.float32)

# 畸变系数
DIST_COEFFS = np.array([0.064292, -0.077024, 0.000439, 0.010203, 0.000000], dtype=np.float32)
map1, map2 = cv2.initUndistortRectifyMap(CAMERA_MATRIX, DIST_COEFFS, None, CAMERA_MATRIX, (640, 480), cv2.CV_16SC2)

SERIAL_PORT = '/dev/ttyTHS1'
BAUD_RATE = 115200

FONT_PATH = "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"

class MovingAverageFilter:
    def __init__(self, window_size=5):
        self.window_size = window_size
        self.values = []
        
    def update(self, value):
        self.values.append(value)
        if len(self.values) > self.window_size:
            self.values.pop(0)
        return sum(self.values) / len(self.values) if self.values else value

frame_queue = Queue(maxsize=5)
result_queue = Queue(maxsize=5)

class SerialManager:
    def __init__(self):
        self.ser = None
        self.lock = threading.Lock()
        self.last_send_time = 0
        self.send_interval = 0.1
        self.error_count = 0
        self.filters = {
            'pitch': MovingAverageFilter(),
            'yaw': MovingAverageFilter(),
            'speed': MovingAverageFilter()
        }
        self.last_sent_values = {
            'pitch': None,
            'yaw': None,
            'speed': None
        }
        self.error_thresholds = {
            'pitch': 1,   # 误差变化阈值
            'yaw': 1,     # 误差变化阈值
            'speed': 50   # 误差变化阈值
        }
        
    def initialize(self):
        try:
            self.ser = serial.Serial(SERIAL_PORT, BAUD_RATE, timeout=0.2)
            print("Serial port initialized successfully")
            return True
        except Exception as e:
            print(f"Serial init error: {e}")
            return False
            
    def build_data_packet(self, pitch, yaw, speed):
        """构建二进制数据包"""
        try:
            speed_int = int(round(speed))
            # 新协议结构：包头(1B) | pitch(4B) | yaw(4B) | speed(2B) | 包尾(1B)
            packet = struct.pack('<B', HEADER)
            packet += struct.pack('<ffH', pitch, yaw, speed_int)
            packet += struct.pack('<B', FOOTER)
            return packet
        except Exception as e:
            print(f"Packet build error: {e}")
            return None

    def safe_send(self, raw_pitch, raw_yaw, raw_speed):
        with self.lock:
            if self.error_count > 10:
                return
            
            # 滤波处理
            filtered_pitch = self.filters['pitch'].update(raw_pitch)
            filtered_yaw = self.filters['yaw'].update(raw_yaw)
            filtered_speed = self.filters['speed'].update(raw_speed)
            
            # 变化检测
            send_required = False
            if self.last_sent_values['pitch'] is None:
                send_required = True
            else:
                delta_pitch = abs(filtered_pitch - self.last_sent_values['pitch'])
                delta_yaw = abs(filtered_yaw - self.last_sent_values['yaw'])
                delta_speed = abs(filtered_speed - self.last_sent_values['speed'])
                
                if (delta_pitch > self.error_thresholds['pitch'] or
                    delta_yaw > self.error_thresholds['yaw'] or
                    delta_speed > self.error_thresholds['speed']):
                    send_required = True
            
            current_time = time.time()
            time_ok = (current_time - self.last_send_time) >= self.send_interval
            
            if send_required and time_ok:
                try:
                    # 构建数据包
                    packet = self.build_data_packet(filtered_pitch, filtered_yaw, filtered_speed)
                    if packet is None:
                        return
                    
                    # 发送数据
                    self.ser.write(packet)
                    
                    # 调试输出
                    debug_str = (f"Sent: pitch={filtered_pitch:.1f}°, "
                                f"yaw={filtered_yaw:.1f}°, "
                                f"speed={int(filtered_speed)}")
                    print(debug_str)
                    
                    # 更新状态
                    self.last_send_time = current_time
                    self.error_count = 0
                    self.last_sent_values.update({
                        'pitch': filtered_pitch,
                        'yaw': filtered_yaw,
                        'speed': filtered_speed
                    })
                except Exception as e:
                    self.error_count += 1
                    print(f"Send error: {e}")

    def close(self):
        if self.ser and self.ser.is_open:
            self.ser.close()

class CameraThread(threading.Thread):
    def __init__(self):
        super().__init__()
        self.pipe = Pipeline()
        config = Config()
        depth_profile = self.pipe.get_stream_profile_list(OBSensorType.DEPTH_SENSOR).get_video_stream_profile(*CAMERA_CONFIG["depth"])
        color_profile = self.pipe.get_stream_profile_list(OBSensorType.COLOR_SENSOR).get_video_stream_profile(*CAMERA_CONFIG["color"])
        config.enable_stream(depth_profile)
        config.enable_stream(color_profile)
        self.pipe.start(config)
        
    def run(self):
        while not exit_flag:
            try:
                frames = self.pipe.wait_for_frames(500)
                if not frames:
                    continue
                color_frame = frames.get_color_frame()
                depth_frame = frames.get_depth_frame()
                if color_frame and depth_frame:
                    color_img = np.frombuffer(color_frame.get_data(), dtype=np.uint8).reshape((480, 640, 3))
                    color_img = cv2.cvtColor(color_img, cv2.COLOR_RGB2BGR)
                    color_img = cv2.remap(color_img, map1, map2, cv2.INTER_LINEAR)
                    depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16).reshape((480, 640))
                    if not frame_queue.full():
                        frame_queue.put((color_img, depth_data))
            except Exception as e:
                print(f"Camera error: {e}")
        self.pipe.stop()

class InferenceThread(threading.Thread):
    def __init__(self, serial_mgr):
        super().__init__()
        self.model = YOLO('/home/jetson/pyorbbecsdk/train/best1.engine', task='detect')
        self.serial_mgr = serial_mgr
        self.last_valid_data = None
        self.current_roi = None
        self.roi_lock = threading.Lock()
        self.pil_font = ImageFont.truetype(FONT_PATH, 24) if os.path.exists(FONT_PATH) else ImageFont.load_default()

    def put_text(self, img_np, text, position):
        img_pil = Image.fromarray(cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB))
        draw = ImageDraw.Draw(img_pil)
        draw.text(position, text, font=self.pil_font, fill=(255,0,0))
        return cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)

    def coordinate_transform(self, X_cam, Y_cam, Z_cam):
        theta = CAMERA_TILT_ANGLE
        # X轴保持不变
        X_robot = X_cam
        # Y轴转换
        Y_robot = Y_cam * math.cos(theta) + Z_cam * math.sin(theta) + CAMERA_MOUNT_HEIGHT
        # Z轴转换：水平距离计算
        Z_robot = Z_cam * math.cos(theta) - Y_cam * math.sin(theta)
        return X_robot, Y_robot, Z_robot

    def calculate_angles(self, x, z):
        if z <= 0:
            return 0.0
        return math.degrees(math.atan2(x, z))

    def calculate_pitch(self, delta_h, horizontal_dist, speed):
        try:
            A = (GRAVITY * horizontal_dist**2) / (2 * speed**2)
            B = -horizontal_dist
            C = delta_h + A
            discriminant = B**2 - 4 * A * C
            if discriminant < 0:
                return None
            tan_theta1 = (-B + math.sqrt(discriminant)) / (2 * A)
            tan_theta2 = (-B - math.sqrt(discriminant)) / (2 * A)
            valid_angles = [math.atan(t) for t in [tan_theta1, tan_theta2] if abs(t) < math.tan(math.radians(80))]
            return min(valid_angles) if valid_angles else None
        except:
            return None

    def get_speed_zone(self, distance):
        for zone in SPEED_ZONES:
            if zone['min'] <= distance < zone['max']:
                return zone['speed']
        return SPEED_ZONES[-1]['speed']

    def run(self):
        while not exit_flag:
            if not roi_queue.empty():
                with self.roi_lock:
                    self.current_roi = roi_queue.get()
            
            if not frame_queue.empty():
                color_img, depth_data = frame_queue.get()
                results = self.model.predict(color_img, conf=0.7, imgsz=640, verbose=False)
                best_detection = None
                if results[0].boxes:
                    for box in results[0].boxes:
                        if box.conf > 0.7 and box.cls == 0:
                            x1, y1 = int(box.xyxy[0][0]), int(box.xyxy[0][1])
                            x2, y2 = int(box.xyxy[0][2]), int(box.xyxy[0][3])
                            cx = (x1 + x2) // 2
                            cy = (y1 + y2) // 2
                            
                            if self.current_roi and not (self.current_roi[0] <= cx <= self.current_roi[2] and
                                                       self.current_roi[1] <= cy <= self.current_roi[3]):
                                continue
                            
                            # 深度
                            depth_roi = depth_data[max(0,cy-5):min(cy+5,480), max(0,cx-5):min(cx+5,640)]
                            valid_depths = depth_roi[depth_roi > 0]
                            if valid_depths.size == 0:
                                continue
                            Z_cam = np.median(valid_depths) * 0.001  # 解算除一千
                            if Z_cam < 0.3:
                                continue
                                
                            # 机器人基座坐标系转换
                            X_cam = (cx - CAMERA_MATRIX[0,2]) * Z_cam / CAMERA_MATRIX[0,0]
                            Y_cam = (cy - CAMERA_MATRIX[1,2]) * Z_cam / CAMERA_MATRIX[1,1]  # 注意Y方向
                            X_robot, Y_robot, Z_robot = self.coordinate_transform(X_cam, Y_cam, Z_cam)
                            

                            horizontal_dist = Z_robot  # Z轴作为水平距离
                            delta_h = BASKET_HEIGHT - (ROBOT_RELEASE_HEIGHT + Y_robot)
                            speed = self.get_speed_zone(horizontal_dist)
                            
                            # 计算角度
                            pitch = self.calculate_pitch(delta_h, horizontal_dist, speed)
                            if pitch is None:
                                continue
                            yaw = self.calculate_angles(X_robot, Z_robot)
                            
                            best_detection = {
                                'bbox': (x1, y1, x2, y2),
                                'params': (math.degrees(pitch), yaw, speed),
                                'center': (cx, cy),
                                'position': (X_robot, Y_robot, Z_robot)
                            }
                            break
                
                if best_detection:
                    self.serial_mgr.safe_send(*best_detection['params'])
                    self.last_valid_data = best_detection
                elif self.last_valid_data:
                    self.serial_mgr.safe_send(*self.last_valid_data['params'])
                
                if not result_queue.full():
                    result_queue.put((color_img, best_detection))

def display_thread():
    cv2.namedWindow("Basketball Tracking", cv2.WINDOW_NORMAL)
    cv2.resizeWindow("Basketball Tracking", 640, 480)
    cv2.setMouseCallback("Basketball Tracking", mouse_callback)
    
    while not exit_flag:
        try:
            if not result_queue.empty():
                color_img, detection = result_queue.get()
                if selection_active:
                    cv2.rectangle(color_img, 
                                (selection_coords[0], selection_coords[1]),
                                (selection_coords[2], selection_coords[3]),
                                (0, 255, 0), 2)
                
                if detection:
                    x1, y1, x2, y2 = detection['bbox']
                    cv2.rectangle(color_img, (x1,y1), (x2,y2), (0,255,0), 2)
                    cx, cy = detection['center']
                    cv2.circle(color_img, (cx,cy), 5, (0,0,255), -1)
                    cv2.line(color_img, 
                            (int(CAMERA_MATRIX[0,2]), int(CAMERA_MATRIX[1,2])),
                            (cx, cy), (255,0,0), 2)
                    
                    text = (f"Pitch: {detection['params'][0]:.1f}°\n"
                            f"Yaw: {detection['params'][1]:.1f}°\n"
                            f"Speed: {detection['params'][2]}\n"
                            f"Pos: ({detection['position'][0]:.2f}, "
                            f"{detection['position'][1]:.2f}, "
                            f"{detection['position'][2]:.2f})m")
                    
                    infer_thread = next((t for t in threading.enumerate() if isinstance(t, InferenceThread)), None)
                    if infer_thread:
                        color_img = infer_thread.put_text(color_img, text, (20, 40))
                
                cv2.imshow("Basketball Tracking", color_img)
            cv2.waitKey(1)
        except Exception as e:
            print(f"Display error: {e}")
    
    cv2.destroyAllWindows()
    
    
 
if __name__ == "__main__":
    serial_mgr = SerialManager()
    if not serial_mgr.initialize():
        print("Failed to initialize serial port")
        exit(1)
    
    cam_thread = CameraThread()
    infer_thread = InferenceThread(serial_mgr)
    
    cam_thread.start()
    infer_thread.start()
    display_thread()
    
    exit_flag = True
    cam_thread.join()
    infer_thread.join()
    serial_mgr.close()
    print("Program exited cleanly")
