import sys
import os
import json
import time
import threading
import queue
import cv2
import numpy as np
import socket
import struct
import datetime
import psutil
import onnxruntime as ort
from queue import Queue
from pymycobot import MyCobot280Socket, MyCobot280
import serial
import crcmod
import socket
import pyaudio
import wave
import tempfile
import sounddevice as sd
import json
from collections import deque
from scipy.signal import savgol_filter
from scipy.interpolate import CubicSpline
import scipy.ndimage as ndi
import math

from PyQt5.QtWidgets import (
    QApplication, QWidget, QVBoxLayout, QHBoxLayout,
    QLabel, QLineEdit, QPushButton, QGroupBox,
    QListWidget, QListWidgetItem, QAbstractItemView,
    QMessageBox, QInputDialog, QProgressBar,
    QComboBox, QSlider, QDialog, QGridLayout,
    QMainWindow, QFileDialog, QSizePolicy, QSplitter, QFrame, QCheckBox,QScrollArea,
    QAction, QMenuBar, QStatusBar
)
from PyQt5.QtCore import Qt, QTimer, QThread, pyqtSignal
from PyQt5.QtGui import QFont, QColor, QBrush, QImage, QPixmap, QIcon, QDoubleValidator,QIntValidator
from PyQt5.QtCore import QMetaObject

# ========== 配置参数 ==========
# 配置文件路径
TEACH_POINTS_FILE = "teach_points.json"
# 机械臂连接配置
ROBOT_IP = "192.168.25.181"
ROBOT_PORT = 9000
CALIBRATION_FILE = "calibration_params.json"
# 音频参数
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
AUDIO_BUFFER_SIZE = 10
# YOLO模型路径
YOLO_MODEL_PATH = ""
CONFIDENCE_THRESHOLD = 0.9  # 置信度阈值
OUTPUT_DIR = "contour_data"  # 轮廓数据输出目录
HISTORY_FRAMES = 5  # 收集前5次识别到的数据
MIN_CONTOUR_POINTS = 15  # 平滑处理所需的最小点数
TARGET_POINTS = 100  # 目标轮廓点数（确保足够平滑）
SMOOTH_SIGMA = 1   # 高斯平滑参数

class CameraThread(QThread):
    """摄像头线程"""
    update_frame = pyqtSignal(np.ndarray)
    status_changed = pyqtSignal(str)

    def __init__(self, camera_type="local", ip=None, port=None, parent=None):
        super().__init__(parent)
        self.camera_type = camera_type
        self.ip = ip
        self.port = port
        self.running = False
        self.cap = None
        self.server_socket = None
        self.client_socket = None

    def run(self):
        """启动摄像头流"""
        self.running = True

        if self.camera_type == "network":
            self.status_changed.emit("正在连接网络摄像头...")
            try:
                self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                self.server_socket.bind((self.ip, self.port))
                self.server_socket.listen(5)
                self.status_changed.emit(f"等待摄像头客户端连接: {self.ip}:{self.port}")

                self.client_socket, addr = self.server_socket.accept()
                self.status_changed.emit(f"摄像头客户端已连接: {addr[0]}:{addr[1]}")
                payload_size = struct.calcsize('>I')
                data = b""

                while self.running:
                    try:
                        # 读取帧长度信息
                        while len(data) < payload_size:
                            packet = self.client_socket.recv(4096)
                            if not packet:
                                break
                            data += packet

                        if len(data) < payload_size:
                            break

                        packed_msg_size = data[:payload_size]
                        data = data[payload_size:]
                        msg_size = struct.unpack('>I', packed_msg_size)[0]

                        # 读取完整的帧数据
                        while len(data) < msg_size:
                            data += self.client_socket.recv(4096)

                        frame_data = data[:msg_size]
                        data = data[msg_size:]
                        frame_array = np.frombuffer(frame_data, dtype=np.uint8)
                        frame = cv2.imdecode(frame_array, flags=cv2.IMREAD_COLOR)

                        if frame is not None:
                            self.update_frame.emit(frame)
                    except Exception as e:
                        self.status_changed.emit(f"摄像头接收错误: {str(e)}")
                        break
            except Exception as e:
                self.status_changed.emit(f"启动网络摄像头失败: {str(e)}")
            finally:
                self.cleanup()
        else:
            # 本地摄像头
            self.status_changed.emit("正在打开本地摄像头...")
            try:
                # 根据操作系统选择不同的后端
                if sys.platform.startswith('linux'):
                    self.cap = cv2.VideoCapture(20, cv2.CAP_V4L2)
                elif sys.platform.startswith('win'):
                    self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
                else:
                    self.cap = cv2.VideoCapture(20)

                if not self.cap.isOpened():
                    self.status_changed.emit("无法打开本地摄像头")
                    return

                self.status_changed.emit("本地摄像头已启动")

                while self.running:
                    ret, frame = self.cap.read()
                    if ret:
                        self.update_frame.emit(frame)
                    else:
                        self.status_changed.emit("无法从摄像头读取帧")
                    time.sleep(0.03)
            except Exception as e:
                self.status_changed.emit(f"本地摄像头错误: {str(e)}")
            finally:
                self.cleanup()

    def cleanup(self):
        """清理资源"""
        if self.cap and self.cap.isOpened():
            self.cap.release()
        if self.client_socket:
            self.client_socket.close()
        if self.server_socket:
            self.server_socket.close()
        self.cap = None
        self.server_socket = None
        self.client_socket = None

    def stop(self):
        """停止摄像头"""
        self.running = False
        self.wait()

class AudioServerThread(QThread):
    """麦克风服务器线程"""
    status_changed = pyqtSignal(str)
    audio_data_ready = pyqtSignal(bytes)

    def __init__(self, parent=None):
        super().__init__(parent)
        self.running = False
        self.server_socket = None
        self.connection = None
        self.audio_queue = queue.Queue(maxsize=AUDIO_BUFFER_SIZE)

    def run(self):
        """启动音频服务器"""
        try:
            self.status_changed.emit("正在启动麦克风服务器...")

            # 创建TCP Socket
            self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self.server_socket.bind(('0.0.0.0', 12345))
            self.server_socket.listen(1)
            self.server_socket.settimeout(5)  # 设置超时时间

            self.status_changed.emit("等待麦克风客户端连接...")
            self.running = True
            self.status_changed.emit("麦克风服务器已启动，等待连接")

            # 接受连接
            self.connection, addr = self.server_socket.accept()
            self.status_changed.emit(f"麦克风客户端已连接: {addr[0]}:{addr[1]}")
            print("开始接收音频数据...")

            # 音频接收循环
            while self.running:
                try:
                    # 接收数据并存储
                    data = self.connection.recv(CHUNK)
                    if not data:
                        self.status_changed.emit("麦克风连接断开")
                        break

                    # 将数据放入队列
                    if self.audio_queue.full():
                        self.audio_queue.get() 
                    self.audio_queue.put(data)

                    # 发出信号通知有新音频数据
                    self.audio_data_ready.emit(data)

                except (ConnectionResetError, BrokenPipeError):
                    self.status_changed.emit("麦克风连接异常中断")
                    break
                except socket.timeout:
                    # 超时但连接未断开，继续等待
                    continue
                except Exception as e:
                    self.status_changed.emit(f"音频接收错误: {str(e)}")
                    break

        except socket.timeout:
            self.status_changed.emit("麦克风连接超时")
        except Exception as e:
            self.status_changed.emit(f"启动麦克风服务器失败: {str(e)}")
        finally:
            self.cleanup()

    def get_audio_data(self):
        """获取最新的音频数据块"""
        try:
            return self.audio_queue.get_nowait()
        except queue.Empty:
            return None

    def get_all_audio_data(self):
        """获取所有缓冲的音频数据"""
        data = b""
        while not self.audio_queue.empty():
            data += self.audio_queue.get_nowait()
        return data

    def stop(self):
        """停止音频服务器"""
        self.running = False
        if self.connection:
            try:
                self.connection.shutdown(socket.SHUT_RDWR)
                self.connection.close()
            except:
                pass
        self.cleanup()

    def cleanup(self):
        """清理资源"""
        if self.server_socket:
            try:
                self.server_socket.close()
            except:
                pass

        # 清空音频队列
        while not self.audio_queue.empty():
            self.audio_queue.get_nowait()

        self.connection = None
        self.server_socket = None

class FixedContourManager:
    """固定轮廓管理器"""
    def __init__(self, max_frames=HISTORY_FRAMES):
        """初始化固定轮廓管理器"""
        self.max_frames = max_frames
        self.raw_contours = deque(maxlen=max_frames)  # 使用deque存储原始轮廓数据
        self.smoothed_contour = None  # 平滑后的轮廓
        self.smoothed_size = None  # 平滑轮廓对应的图像尺寸
        self.completed = False  # 是否完成固定轮廓生成
        self.tracking_id = None  # 跟踪的目标ID
    
    def add_contour(self, contour_data):
        """添加新的轮廓数据到管理器"""
        # 如果已完成或数据无效，直接返回
        if self.completed or contour_data is None or not contour_data.get('contour_points'):
            return
            
        # 首次添加时记录目标ID
        if not self.raw_contours:
            self.tracking_id = contour_data.get('class_id', -1)
            print(f"开始跟踪目标ID: {self.tracking_id}")
        
        # 仅添加相同ID的轮廓
        if contour_data.get('class_id', -1) == self.tracking_id:
            self.raw_contours.append(contour_data)
            print(f"添加轮廓数据 | 当前数量: {len(self.raw_contours)}/{self.max_frames}")
            
            # 检查是否达到最大帧数
            if len(self.raw_contours) >= self.max_frames:
                print("收集完成，开始计算固定轮廓")
                self.completed = True
                self._calculate_fixed_contour()
    
    def get_fixed_contour(self):
        """获取最终固定轮廓"""
        return self.smoothed_contour, self.smoothed_size
    
    def _calculate_fixed_contour(self):
        """计算最终的固定轮廓"""
        if not self.raw_contours:
            print("无轮廓数据可用于计算固定轮廓")
            return
        
        print(f"开始计算固定轮廓 | 收集帧数: {len(self.raw_contours)}")
        
        # 获取最新轮廓的点数作为基准
        base_points = len(self.raw_contours[-1]['contour_points'])
        
        # 如果点数太少，则使用最后一个轮廓
        if base_points < MIN_CONTOUR_POINTS:
            print("点数太少，直接使用最后一个轮廓")
            self.smoothed_contour = self.raw_contours[-1]['contour_points']
            self.smoothed_size = self.raw_contours[-1]['image_size']
            self.completed = True
            return
        
        # 调整所有轮廓点数为相同长度
        aligned_contours = []
        for contour in self.raw_contours:
            current_points = np.array(contour['contour_points'])
            aligned = self._align_point_count(current_points, TARGET_POINTS)
            aligned_contours.append(aligned)
        
        # 计算每个点的中间值
        stacked = np.stack(aligned_contours)
        fixed_contour = np.median(stacked, axis=0)
        
        # 应用高级平滑处理
        smoothed_contour = self._apply_advanced_smoothing(fixed_contour)
            
        self.smoothed_contour = smoothed_contour.tolist()
        self.smoothed_size = self.raw_contours[-1]['image_size']
        self.completed = True
        
        print(f"固定轮廓生成完成 | 点数: {len(self.smoothed_contour)}")
    
    def _align_point_count(self, points, target_count):
        """重采样轮廓点使其具有相同点数"""
        if len(points) == target_count:
            return points
        
        # 计算轮廓周长
        contour_length = np.sum(np.linalg.norm(np.diff(points, axis=0, append=points[0:1]), axis=1))
        
        # 生成线性参数化
        t_current = np.linspace(0, contour_length, len(points))
        t_target = np.linspace(0, contour_length, target_count)
        
        # 使用三次样条插值保持曲线平滑
        if len(points) > 3:
            cs_x = CubicSpline(t_current, points[:, 0])
            cs_y = CubicSpline(t_current, points[:, 1])
            x_interp = cs_x(t_target)
            y_interp = cs_y(t_target)
        else:
            # 点数太少时使用线性插值
            x_interp = np.interp(t_target, t_current, points[:, 0])
            y_interp = np.interp(t_target, t_current, points[:, 1])
        
        return np.column_stack((x_interp, y_interp))
    
    def _apply_advanced_smoothing(self, contour):
        """应用高级平滑算法"""
        # Savitzky-Golay滤波器
        if len(contour) > 7:
            window_size = min(7, len(contour) // 2 * 2 + 1)  # 确保为奇数
            x_smooth = savgol_filter(contour[:, 0], window_size, 3)
            y_smooth = savgol_filter(contour[:, 1], window_size, 3)
            contour = np.column_stack((x_smooth, y_smooth))
        
        # 高斯平滑
        x_smooth = ndi.gaussian_filter1d(contour[:, 0], sigma=SMOOTH_SIGMA)
        y_smooth = ndi.gaussian_filter1d(contour[:, 1], sigma=SMOOTH_SIGMA)
        
        # 边界平滑
        if len(contour) > 10:
            # 创建周期性轮廓以平滑边界
            periodic_contour = np.vstack((contour, contour[:5]))
            x_periodic = ndi.gaussian_filter1d(periodic_contour[:, 0], sigma=SMOOTH_SIGMA)
            y_periodic = ndi.gaussian_filter1d(periodic_contour[:, 1], sigma=SMOOTH_SIGMA)
            
            # 取中间部分
            smoothed_contour = np.column_stack((
                x_periodic[2:-2],
                y_periodic[2:-2]
            ))
        else:
            smoothed_contour = np.column_stack((x_smooth, y_smooth))
        
        return smoothed_contour
    
    def reset(self):
        """重置管理器状态"""
        self.raw_contours.clear()
        self.completed = False
        self.tracking_id = None
        self.smoothed_contour = None
        self.smoothed_size = None
        print(" 轮廓管理器已重置")

class CameraDetectionSystem:
    """摄像头接收、目标检测、坐标转换的系统"""
    def __init__(self, model_path, server_ip = '0.0.0.0', server_port = 9999):
        self.model_path = model_path          # ONNX模型文件路径
        self.server_ip = server_ip            # 服务器IP地址（默认监听所有接口）
        self.server_port = server_port        # 服务器端口号
        self.input_size = (640, 640)          # 模型输入尺寸（宽度，高度）
        self.conf_thres = 0.25                # 目标检测置信度阈值（25%以上才保留）
        self.nms_thres = 0.45                 # 非极大值抑制阈值（过滤重叠检测框）
        self.num_classes = 80                 # 目标类别数量（初始值，实际从模型获取）
        self.calibration_complete = False     # 标定完成标志（摄像头标定状态）
        self.c_x = 0                          # 标定中心点X坐标（像素坐标系）
        self.c_y = 0                          # 标定中心点Y坐标（像素坐标系）
        self.ratio = 0                        # 像素到世界坐标转换比例（毫米/像素）
        self.camera_offset_x = 165             # 相机相对于机器人基座的X偏移(mm)
        self.camera_offset_y = 5              # 相机相对于机器人基座的Y偏移(mm)
        self.calibration_frames = []           # 存储标定过程中收集的帧数据
        self.calibration_in_progress = False  # 标定进行中标志
        self.frame_queue = Queue(maxsize = 10) # 帧队列（存储接收的视频帧）
        self.stop_event = threading.Event()    # 停止事件（用于线程安全停止）
        self.receive_thread = None            # 视频接收线程
        self.session = None                   # ONNX运行时会话
        self.input_name = None                # 模型输入节点名称
        self.output_name = None               # 模型输出节点名称
        self.output_dir = "contour_data"       # 轮廓数据输出目录
        self.frame_counter = 0                # 帧计数器（记录处理帧数）
        self.fixed_contour = None             # 存储固定轮廓数据
        self.save_as_fixed = False            # 保存为固定轮廓标志
        self.init_aruco()                     # 初始化ArUco检测器
        self.num_mask = 32                    # 掩膜通道数（用于实例分割）
        self.num_classes = 0                  # 重置类别数量（实际从模型获取）

    def preprocess(self, image):
        """图像预处理"""
        orig_h, orig_w = image.shape[:2]
        input_tensor = cv2.resize(image, (640, 640))
        input_tensor = cv2.cvtColor(input_tensor, cv2.COLOR_BGR2RGB)
        input_tensor = input_tensor.astype(np.float32) / 255.0
        input_tensor = np.transpose(input_tensor, (2, 0, 1))
        return np.expand_dims(input_tensor, axis=0), (orig_w, orig_h)

    def infer(self, image):
        """推理函数"""
        input_tensor, orig_size = self.preprocess(image)
        outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
        det_output, mask_output = outputs
        predictions = np.squeeze(det_output).T
        mask_protos = mask_output[0]

        boxes, masks = [], []
        orig_w, orig_h = orig_size

        best_confidence = CONFIDENCE_THRESHOLD  
        best_pred = None
        
        # 循环寻找最高置信度目标
        for pred in predictions:
            if self.num_classes > 0:
                scores = pred[5:5 + self.num_classes]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
            else:
                confidence = pred[4]
                class_id = 0

            # 更新最佳目标
            if confidence > 0.5 and confidence > best_confidence:
                best_confidence = confidence
                best_pred = pred

        # 处理找到的最佳目标
        if best_pred is not None:
            if self.num_classes > 0:
                scores = best_pred[5:5 + self.num_classes]
                class_id = int(np.argmax(scores))
            else:
                class_id = 0
                
            # 坐标转换和数据处理
            x, y, w, h = best_pred[0:4]
            x *= orig_w / 640
            y *= orig_h / 640
            w *= orig_w / 640
            h *= orig_h / 640
            mask_coeffs = best_pred[-self.num_mask:]
            
            boxes.append({
                'class_id': class_id,
                'confidence': float(best_confidence),
                'bbox': [float(x), float(y), float(w), float(h)]
            })
            masks.append(mask_coeffs)

        return boxes, masks, mask_protos, (orig_w, orig_h)

    def extract_contour_data(image, boxes, masks, protos, img_size):
        """提取并返回目标的轮廓数据"""
        if not boxes:
            return None, image
        
        # 获取目标边界框信息
        x, y, w, h = boxes[0]['bbox']
        bbox_x1 = int(x - w/2)
        bbox_y1 = int(y - h/2)
        bbox_x2 = int(x + w/2)
        bbox_y2 = int(y + h/2)
        bbox_area = w * h
        
        # 处理掩膜
        masks_np = np.stack(masks, axis=0)
        protos_flat = protos.reshape(32, -1)
        mask_output = masks_np @ protos_flat
        mask_output = 1 / (1 + np.exp(-mask_output))
        mask_output = mask_output.reshape(-1, protos.shape[1], protos.shape[2])
        m = mask_output[0]
        m = cv2.resize(m, img_size, interpolation=cv2.INTER_LINEAR)
        
        # 二值化掩膜
        _, binary_mask = cv2.threshold(m, 0.5, 255, cv2.THRESH_BINARY)
        binary_mask = binary_mask.astype(np.uint8)
        kernel = np.ones((3, 3), np.uint8)
        binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_OPEN, kernel)
        
        # 查找所有轮廓
        contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        if not contours:
            return None, image
        
        # 筛选有效轮廓
        valid_contours = []
        for contour in contours:
            contour_x, contour_y, contour_w, contour_h = cv2.boundingRect(contour)
            contour_x2 = contour_x + contour_w
            contour_y2 = contour_y + contour_h
            
            overlap_x1 = max(bbox_x1, contour_x)
            overlap_y1 = max(bbox_y1, contour_y)
            overlap_x2 = min(bbox_x2, contour_x2)
            overlap_y2 = min(bbox_y2, contour_y2)
            
            overlap_width = max(0, overlap_x2 - overlap_x1)
            overlap_height = max(0, overlap_y2 - overlap_y1)
            overlap_area = overlap_width * overlap_height
            
            contour_area = cv2.contourArea(contour)
            
            if overlap_area > 0.5 * bbox_area and 0.1 * bbox_area < contour_area < 2.0 * bbox_area:
                valid_contours.append(contour)
        
        if not valid_contours:
            return None, image
        
        # 处理主轮廓
        main_contour = max(valid_contours, key=cv2.contourArea)
        contour_points = []
        
        # 自适应轮廓简化
        if main_contour is not None and len(main_contour) > 4:
            # 计算轮廓周长
            contour_length = cv2.arcLength(main_contour, True)
            base_epsilon = 0.005 * contour_length
            
            # 计算曲率变化
            curvature_scores = []
            for i in range(1, len(main_contour)-1):
                p1 = main_contour[i-1][0]
                p2 = main_contour[i][0]
                p3 = main_contour[i+1][0]
                
                # 计算向量
                vec1 = (p1[0]-p2[0], p1[1]-p2[1])
                vec2 = (p3[0]-p2[0], p3[1]-p2[1])
                
                # 计算角度差
                angle1 = np.arctan2(vec1[1], vec1[0])
                angle2 = np.arctan2(vec2[1], vec2[0])
                angle_diff = np.abs(np.degrees(angle1 - angle2))
                angle_diff = min(angle_diff, 360 - angle_diff)
                curvature_scores.append(angle_diff)
            
            # 点级别自适应简化
            for i in range(len(main_contour)):
                if i == 0 or i == len(main_contour)-1:
                    # 保留起点和终点
                    contour_points.append(main_contour[i][0].tolist())
                    continue
                    
                # 获取局部曲率
                prev_score = curvature_scores[i-1] if i >= 1 else 0
                curr_score = curvature_scores[i] if i < len(curvature_scores) else 0
                max_curvature = max(prev_score, curr_score)
                
                # 在平直区域应用更强的简化
                epsilon = base_epsilon
                if max_curvature < 60:  # 小角度
                    # 创建局部片段
                    start_idx = max(0, i-1)
                    end_idx = min(len(main_contour), i+2)
                    segment = main_contour[start_idx:end_idx]
                    
                    # 简化局部片段
                    if len(segment) > 2:
                        segment = segment.reshape(-1, 1, 2)
                        simplified = cv2.approxPolyDP(segment, epsilon, False)
                        
                        # 只保留中间点
                        if len(simplified) == 3:
                            contour_points.append(simplified[1][0].tolist())
                    else:
                        contour_points.append(main_contour[i][0].tolist())
                else:
                    # 在拐点处保留原始点
                    contour_points.append(main_contour[i][0].tolist())
        else:
            # 简化短轮廓
            contour_points = main_contour.squeeze().tolist()
        
        # 确保格式正确
        if len(contour_points) > 0 and not isinstance(contour_points[0], list):
            contour_points = [contour_points]
        
        # 轮廓点后处理 - 移除异常点
        if len(contour_points) > 2:
            filtered_points = []
            max_distance = min(img_size) * 0.05  # 最大允许距离（图像尺寸的5%）
            
            for i in range(len(contour_points)):
                current_point = contour_points[i]
                
                if len(filtered_points) > 0:
                    prev_point = filtered_points[-1]
                    distance = np.sqrt((current_point[0]-prev_point[0])**2 + 
                                    (current_point[1]-prev_point[1])**2)
                    
                    if distance < max_distance:
                        filtered_points.append(current_point)
                    else:
                        # 添加中点作为过渡点
                        mid_point = [
                            (prev_point[0] + current_point[0]) / 2,
                            (prev_point[1] + current_point[1]) / 2
                        ]
                        filtered_points.append(mid_point)
                        filtered_points.append(current_point)
                else:
                    filtered_points.append(current_point)
            
            contour_points = filtered_points
        
        # 创建轮廓数据结构
        contour_data = {
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
            "confidence": boxes[0]['confidence'],
            "class_id": boxes[0]['class_id'],
            "bbox": boxes[0]['bbox'],
            "contour_points": contour_points,
            "image_size": img_size
        }
        
        # 准备轮廓点用于绘制
        contour_array = np.array(contour_points, dtype=np.int32).reshape((-1, 1, 2))
        
        # 绘制轮廓
        overlay = image.copy()
        cv2.rectangle(overlay, (bbox_x1, bbox_y1), (bbox_x2, bbox_y2), (0, 255, 0), 2)
        cv2.drawContours(overlay, [contour_array], -1, (0, 0, 255), 3)
        
        if len(contour_points) > 0:
            start_point = tuple(map(int, contour_points[0]))
            cv2.circle(overlay, start_point, 8, (255, 0, 0), -1)
            cv2.putText(overlay, "Start", (start_point[0]+10, start_point[1]), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
        
        cv2.putText(overlay, f"Conf: {boxes[0]['confidence']:.2f}", 
                (bbox_x1, bbox_y1 - 10), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        
        # 添加点数量显示
        cv2.putText(overlay, f"Points: {len(contour_points)}", 
                (10, image.shape[0] - 20), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
        
        return contour_data, overlay
        
    def draw_fixed_contour(image, fixed_contour, original_size):
        """在图像上绘制固定轮廓"""
        overlay = image.copy()
        
        if not fixed_contour:
            return overlay
        
        # 转换为可绘制的格式
        points_array = np.array(fixed_contour, dtype=np.int32).reshape((-1, 1, 2))
        
        # 绘制轮廓
        cv2.drawContours(overlay, [points_array], -1, (0, 255, 255), 3) 
        
        # 标记轮廓起点
        if len(points_array) > 0:
            start_point = tuple(points_array[0][0])
            cv2.circle(overlay, start_point, 8, (255, 100, 0), -1) 
            cv2.putText(overlay, "Fixed", (start_point[0]+10, start_point[1]), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 100, 0), 2)
        
        # 添加固定轮廓标记
        cv2.putText(overlay, "Fixed Contour", (10, 90), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
        
        return overlay

    def save_contour_data(contour_data, frame_count, is_fixed=False):
        """保存轮廓数据到JSON文件"""
        if contour_data is None:
            return
        
        prefix = "fixed_" if is_fixed else ""
        filename = os.path.join(OUTPUT_DIR, f"{prefix}contour_{frame_count}.json")
        with open(filename, 'w') as f:
            json.dump(contour_data, f, indent=2)
        
        print(f"💾 轮廓数据已保存到: {filename}")

    def draw_masks_on_image(self, image, boxes, mask_coeffs, mask_protos, img_size):
        """掩码叠加（来自文档1）"""
        masks = []
        for coeff in mask_coeffs:
            mask = np.tensordot(coeff.astype(np.float32), mask_protos.astype(np.float32), axes=([0], [0]))
            mask = 1 / (1 + np.exp(-mask))
            mask = cv2.resize(mask, img_size)
            mask = (mask > 0.5).astype(np.uint8) * 255
            masks.append(mask)

        for m in masks:
            color = np.random.randint(0, 255, (3,), dtype=np.uint8)
            overlay = np.zeros_like(image)
            for c in range(3):
                overlay[:, :, c] = m * color[c] // 255
            image = cv2.addWeighted(image, 1.0, overlay, 0.5, 0)
        return image

    def init_aruco(self):
        """初始化ArUco检测器"""
        # try:
        #     # 使用新版本的ArUco API
        #     self.aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
        #     self.aruco_params = cv2.aruco.DetectorParameters()
        # except AttributeError:
        #     # 对于旧版本的OpenCV使用旧API
        #     self.aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)
        #     self.aruco_params = cv2.aruco.DetectorParameters_create()

        # 对于旧版本的OpenCV使用旧API
        self.aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)
        self.aruco_params = cv2.aruco.DetectorParameters_create()

    def load_model(self):
        """加载ONNX模型（更新以获取类别数）"""
        try:
            print(f"[INFO] 加载模型: {self.model_path}")
            self.session = ort.InferenceSession(
                self.model_path,
                providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
            )
            print("Providers:", self.session.get_providers())
            self.input_name = self.session.get_inputs()[0].name
            self.output_names = [o.name for o in self.session.get_outputs()]
            print(f"[INFO] 模型加载成功! 输入名: {self.input_name}, 输出名: {self.output_names}")

            # 获取模型输入尺寸
            input_info = self.session.get_inputs()[0]
            input_shape = input_info.shape
            if len(input_shape) == 4:  # [batch, channel, height, width]
                height = input_shape[2]
                width = input_shape[3]
                self.input_size = (width, height)
                print(f"[INFO] 模型输入尺寸为: {self.input_size}")
            elif len(input_shape) == 3:  # [batch, height, width]
                height = input_shape[1]
                width = input_shape[2]
                self.input_size = (width, height)
                print(f"[INFO] 模型输入尺寸为: {self.input_size}")
            else:
                print(f"[WARNING] 无法识别的输入形状: {input_shape}, 使用默认尺寸")
                self.input_size = (640, 640)

            # 动态检测类别数量（关键更新）
            output_info = self.session.get_outputs()[0]
            output_shape = output_info.shape
            if len(output_shape) >= 2:
                # 输出形状为 [batch, 84, 8400]
                # 84 = (x, y, w, h, obj_conf) + num_classes + num_mask
                self.num_classes = output_shape[1] - 5 - self.num_mask
                print(f"[INFO] 检测到 {self.num_classes} 个类别")
            else:
                self.num_classes = 80
                print("[WARNING] 无法确定类别数量，使用默认值80")

            return True
        except Exception as e:
            print(f"[ERROR] 加载模型失败: {str(e)}")
            return False

    def start_receiving(self):
        """启动接收线程"""
        if self.receive_thread and self.receive_thread.is_alive():
            return True

        self.receive_thread = threading.Thread(target=self.receive_video_thread, daemon=True)
        self.receive_thread.start()
        print("[SERVER] 接收线程已启动")
        return True

    def save_calibration_params(self, file_path=CALIBRATION_FILE):
        """保存标定参数到文件"""
        try:
            params = {
                'c_x': self.c_x,
                'c_y': self.c_y,
                'ratio': self.ratio,
                'timestamp': time.strftime("%Y-%m-%d %H:%M:%S")
            }
            with open(file_path, 'w') as f:
                json.dump(params, f, indent=4)
            print(f"[CALIBRATION] 标定参数已保存到: {file_path}")
            return True
        except Exception as e:
            print(f"[ERROR] 保存标定参数失败: {str(e)}")
            return False

    def load_calibration_params(self, file_path=CALIBRATION_FILE):
        """从文件加载标定参数"""
        try:
            if os.path.exists(file_path):
                with open(file_path, 'r') as f:
                    params = json.load(f)
                self.c_x = params.get('c_x', 0)
                self.c_y = params.get('c_y', 0)
                self.ratio = params.get('ratio', 0)
                self.calibration_complete = True
                print(f"[CALIBRATION] 标定参数已加载: c_x={self.c_x}, c_y={self.c_y}, ratio={self.ratio}")
                return True
            else:
                print("[CALIBRATION] 未找到标定文件")
                return False
        except Exception as e:
            print(f"[ERROR] 加载标定参数失败: {str(e)}")
            return False

    def receive_video_thread(self):
        """在子线程中接收视频帧"""
        try:
            server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            server_socket.bind((self.server_ip, self.server_port))
            server_socket.listen(5)
            print(f"[SERVER] 服务端已启动，正在监听 {self.server_ip}:{self.server_port}")

            client_socket, client_addr = server_socket.accept()
            print(f"[SERVER] 已接受来自 {client_addr} 的连接")
            payload_size = struct.calcsize('>I')

            data = b""
            while not self.stop_event.is_set():
                try:
                    # 读取帧长度信息
                    while len(data) < payload_size:
                        packet = client_socket.recv(4096)
                        if not packet:
                            print("[SERVER] 客户端断开连接")
                            break
                        data += packet

                    if len(data) < payload_size:
                        print("[SERVER] 客户端断开连接")
                        break

                    packed_msg_size = data[:payload_size]
                    data = data[payload_size:]
                    msg_size = struct.unpack('>I', packed_msg_size)[0]

                    # 读取完整的帧数据
                    while len(data) < msg_size:
                        to_receive = min(4096, msg_size - len(data))
                        packet = client_socket.recv(to_receive)
                        if not packet:
                            print("[SERVER] 客户端断开连接")
                            break
                        data += packet

                    if len(data) < msg_size:
                        print("[SERVER] 连接中断，未能接收完整帧")
                        break

                    # 分割出当前帧数据
                    frame_data = data[:msg_size]
                    data = data[msg_size:]
                    frame_array = np.frombuffer(frame_data, dtype=np.uint8)
                    frame = cv2.imdecode(frame_array, flags=cv2.IMREAD_COLOR)

                    if frame is None:
                        print("[SERVER] 解码帧失败")
                        continue

                    # 将帧放入队列供主线程使用
                    try:
                        self.frame_queue.put(frame.copy(), block=True, timeout=0.5)
                    except queue.Full:
                        # 如果队列满了，跳过此帧
                        print("[SERVER] 帧队列已满，跳过帧")
                except socket.timeout:
                    print("[SERVER] 套接字超时")
                    continue
                except socket.error as e:
                    if not self.stop_event.is_set():
                        print(f"[SERVER] 套接字错误: {str(e)}")
                    break
                except Exception as e:
                    print(f"[SERVER] 接收错误: {str(e)}")
                    import traceback
                    traceback.print_exc()
                    break

        except Exception as e:
            print(f"[SERVER] 接收线程发生错误: {str(e)}")
            import traceback
            traceback.print_exc()
        finally:
            print("[SERVER] 清理接收线程资源")
            if 'client_socket' in locals():
                client_socket.close()
            server_socket.close()
            print("[SERVER] 接收线程已停止")

    def detect_aruco_markers(self, img):
        """检测ArUco标记并绘制到图像上，返回检测结果"""
        if not self.calibration_complete or self.calibration_in_progress:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            try:
                # 尝试使用新版本的ArUco API
                detector = cv2.aruco.ArucoDetector(self.aruco_dict, self.aruco_params)
                corners, ids, _ = detector.detectMarkers(gray)
            except (AttributeError, TypeError):
                # 对于旧版本的OpenCV使用旧API
                corners, ids, _ = cv2.aruco.detectMarkers(gray, self.aruco_dict, parameters=self.aruco_params)

            if ids is None or len(ids) < 2:
                return None

            marker_positions = []
            for i, corner in enumerate(corners):
                pts = corner[0]
                x = int(np.mean(pts[:, 0]))
                y = int(np.mean(pts[:, 1]))
                marker_positions.append((ids[i][0], x, y))

            # 按ID排序以确保一致性
            marker_positions.sort(key=lambda x: x[0])

            if len(marker_positions) < 2:
                return None

            _, x1, y1 = marker_positions[0]
            _, x2, y2 = marker_positions[1]

            # 绘制ArUco标记
            try:
                # 新版本绘制方法
                cv2.aruco.drawDetectedMarkers(img, corners, ids)
            except (AttributeError, TypeError):
                # 旧版本绘制方法
                cv2.aruco.drawDetectedMarkers(img, corners, ids)

            # 在标记中心绘制圆点
            cv2.circle(img, (x1, y1), 10, (0, 255, 0), 2)
            cv2.circle(img, (x2, y2), 10, (0, 255, 0), 2)
            cv2.putText(img, f"Marker 1", (x1 - 40, y1 - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            cv2.putText(img, f"Marker 2", (x2 - 40, y2 - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

            return x1, x2, y1, y2

        return None

    def pixel_to_world_coords(self, pixel_x, pixel_y):
        """将像素坐标转换为世界坐标系坐标"""
        if not self.calibration_complete:
            return 0, 0, False

        offset_x = (pixel_y - self.c_y) * self.ratio
        offset_y = (pixel_x - self.c_x) * self.ratio

        world_x = offset_x + self.camera_offset_x
        world_y = offset_y + self.camera_offset_y
        return world_x, world_y, True

    def non_max_suppression(self, boxes, scores, iou_threshold):
        """执行非极大值抑制，过滤重叠的检测框"""
        if len(boxes) == 0:
            return []

        x1 = boxes[:, 0]
        y1 = boxes[:, 1]
        x2 = boxes[:, 2]
        y2 = boxes[:, 3]

        areas = (x2 - x1 + 1) * (y2 - y1 + 1)

        idxs = scores.argsort()[::-1]

        keep = []

        while len(idxs) > 0:
            i = idxs[0]
            keep.append(i)

            xx1 = np.maximum(x1[i], x1[idxs[1:]])
            yy1 = np.maximum(y1[i], y1[idxs[1:]])
            xx2 = np.minimum(x2[i], x2[idxs[1:]])
            yy2 = np.minimum(y2[i], y2[idxs[1:]])

            w = np.maximum(0.0, xx2 - xx1 + 1)
            h = np.maximum(0.0, yy2 - yy1 + 1)

            intersection = w * h
            iou = intersection / (areas[i] + areas[idxs[1:]] - intersection)

            mask = iou <= iou_threshold
            idxs = idxs[1:][mask]

        return keep

    def draw_fixed_contour(self, image, fixed_contour, original_size):
        """在图像上绘制固定轮廓（新增方法）"""
        overlay = image.copy()
        w, h = original_size
        
        if not fixed_contour:
            return overlay
        
        # 转换为可绘制的格式
        points_array = np.array(fixed_contour, dtype=np.int32).reshape((-1, 1, 2))
        
        # 绘制轮廓
        cv2.drawContours(overlay, [points_array], -1, (0, 255, 255), 3)  # 青色轮廓表示固定轮廓
        
        # 标记轮廓起点
        if len(points_array) > 0:
            start_point = tuple(points_array[0][0])
            cv2.circle(overlay, start_point, 8, (255, 100, 0), -1)  # 橙蓝色点
            cv2.putText(overlay, "Fixed", (start_point[0]+10, start_point[1]), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 100, 0), 2)
        
        # 添加固定轮廓标记
        cv2.putText(overlay, "Fixed Contour", (10, 90), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
        
        return overlay

    def save_contour_data(self, contour_data, frame_count, is_fixed=False):
        """保存轮廓数据到JSON文件（新增方法）"""
        if contour_data is None:
            return
        
        # 确保输出目录存在
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        
        prefix = "fixed_" if is_fixed else ""
        filename = os.path.join(self.output_dir, f"{prefix}contour_{frame_count}.json")
        with open(filename, 'w') as f:
            json.dump(contour_data, f, indent=2)
        
        print(f"💾 轮廓数据已保存到: {filename}")

    def detect_objects(self, frame):
        """检测图像中的对象（整合轮廓处理和固定轮廓）"""
        if not self.session or not self.calibration_complete:
            return frame, [], []

        # 帧计数器递增
        self.frame_counter += 1
        
        try:
            # 使用更新后的推理函数
            boxes, mask_coeffs, mask_protos, img_size = self.infer(frame)
            orig_w, orig_h = img_size
            detections = []
            world_coords = []
            
            # 处理轮廓
            contour_data = None
            if boxes:
                box = boxes[0]
                mask = mask_coeffs[0] if mask_coeffs else None
                
                # 计算边界框坐标
                x, y, w, h = box['bbox']
                x1 = int(max(0, min(x - w / 2, orig_w - 1)))
                y1 = int(max(0, min(y - h / 2, orig_h - 1)))
                x2 = int(max(0, min(x + w / 2, orig_w - 1)))
                y2 = int(max(0, min(y + h / 2, orig_h - 1)))
                
                # 计算中心点
                center_x = (x1 + x2) // 2
                center_y = (y1 + y2) // 2
                
                # 转换到世界坐标
                world_x, world_y, success = self.pixel_to_world_coords(center_x, center_y)
                
                # 处理掩膜和轮廓（新增轮廓处理）
                if mask is not None and mask_protos is not None:
                    contour_data, frame = self.process_contour(
                        frame, box, [mask], mask_protos, (orig_w, orig_h)
                    )
                    
                    # 从轮廓数据中获取更精确的中心点
                    if contour_data and contour_data.get("contour_points"):
                        contour_points = np.array(contour_data["contour_points"])
                        contour_center = np.mean(contour_points, axis=0).astype(int)
                        center_x, center_y = contour_center
                        
                        # 使用轮廓中心重新计算世界坐标
                        world_x, world_y, success = self.pixel_to_world_coords(center_x, center_y)
                        
                        # 保存当前轮廓作为固定轮廓的选项
                        if self.save_as_fixed:
                            self.fixed_contour = contour_points.tolist()
                            self.save_as_fixed = False  # 重置标志
                else:
                    # 如果没有掩膜数据，使用边界框绘制
                    color = (0, 255, 0)  # 绿色边框
                    cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
                    label_text = f"{box['class_id']}:{box['confidence']:.2f}"
                    cv2.putText(frame, label_text, (x1, y1 - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

                # 绘制中心点和坐标
                cv2.circle(frame, (center_x, center_y), 5, (0, 255, 255), -1)
                if success:
                    coord_text = f"({world_x:.1f}, {world_y:.1f}) mm"
                    cv2.putText(frame, coord_text, (center_x + 10, center_y - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)

                # 记录检测信息
                detections.append({
                    'center': (center_x, center_y),
                    'world_coords': (world_x, world_y),
                    'bbox': (x1, y1, x2, y2),
                    'label': box['class_id'],
                    'score': box['confidence'],
                    'contour': contour_data  # 轮廓数据
                })
                world_coords.append((world_x, world_y))
                
                # 保存轮廓数据
                if contour_data and self.save_contour_data:
                    self.save_contour_data(contour_data, self.frame_counter)
            
            # 绘制固定轮廓
            if self.fixed_contour:
                frame = self.draw_fixed_contour(frame, self.fixed_contour, (orig_w, orig_h))
                
                # 保存固定轮廓数据
                if self.save_fixed_contour:
                    self.save_contour_data({
                        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
                        "fixed_contour": True,
                        "contour_points": self.fixed_contour,
                        "image_size": (orig_w, orig_h)
                    }, self.frame_counter, is_fixed=True)
                    self.save_fixed_contour = False  # 重置标志

            return frame, detections, world_coords

        except Exception as e:
            print(f"目标检测错误: {str(e)}")
            import traceback
            traceback.print_exc()
            return frame, [], []
        
    def process_contour(self, image, box, masks, protos, img_size):
        """处理轮廓数据（基于新函数实现）"""
        # 获取目标边界框信息
        x, y, w, h = box['bbox']
        bbox_x1 = int(x - w/2)
        bbox_y1 = int(y - h/2)
        bbox_x2 = int(x + w/2)
        bbox_y2 = int(y + h/2)
        bbox_area = w * h
        
        # 处理掩膜
        masks_np = np.stack(masks, axis=0)
        protos_flat = protos.reshape(protos.shape[0], -1)
        mask_output = masks_np @ protos_flat
        mask_output = 1 / (1 + np.exp(-mask_output))
        mask_output = mask_output.reshape(-1, protos.shape[1], protos.shape[2])
        m = mask_output[0]
        m = cv2.resize(m, img_size, interpolation=cv2.INTER_LINEAR)
        
        # 二值化掩膜
        _, binary_mask = cv2.threshold(m, 0.5, 255, cv2.THRESH_BINARY)
        binary_mask = binary_mask.astype(np.uint8)
        kernel = np.ones((3, 3), np.uint8)
        binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_OPEN, kernel)
        
        # 查找所有轮廓
        contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        if not contours:
            return None, image
        
        # 筛选有效轮廓
        valid_contours = []
        for contour in contours:
            contour_x, contour_y, contour_w, contour_h = cv2.boundingRect(contour)
            contour_x2 = contour_x + contour_w
            contour_y2 = contour_y + contour_h
            
            overlap_x1 = max(bbox_x1, contour_x)
            overlap_y1 = max(bbox_y1, contour_y)
            overlap_x2 = min(bbox_x2, contour_x2)
            overlap_y2 = min(bbox_y2, contour_y2)
            
            overlap_width = max(0, overlap_x2 - overlap_x1)
            overlap_height = max(0, overlap_y2 - overlap_y1)
            overlap_area = overlap_width * overlap_height
            
            contour_area = cv2.contourArea(contour)
            
            if overlap_area > 0.5 * bbox_area and 0.1 * bbox_area < contour_area < 2.0 * bbox_area:
                valid_contours.append(contour)
        
        if not valid_contours:
            return None, image
        
        # 处理主轮廓
        main_contour = max(valid_contours, key=cv2.contourArea)
        contour_points = []
        
        # 自适应轮廓简化
        if main_contour is not None and len(main_contour) > 4:
            contour_length = cv2.arcLength(main_contour, True)
            base_epsilon = 0.005 * contour_length
            
            # 计算曲率变化
            curvature_scores = []
            for i in range(1, len(main_contour)-1):
                p1 = main_contour[i-1][0]
                p2 = main_contour[i][0]
                p3 = main_contour[i+1][0]
                
                vec1 = (p1[0]-p2[0], p1[1]-p2[1])
                vec2 = (p3[0]-p2[0], p3[1]-p2[1])
                
                angle1 = np.arctan2(vec1[1], vec1[0])
                angle2 = np.arctan2(vec2[1], vec2[0])
                angle_diff = np.abs(np.degrees(angle1 - angle2))
                angle_diff = min(angle_diff, 360 - angle_diff)
                curvature_scores.append(angle_diff)
            
            # 点级别自适应简化
            for i in range(len(main_contour)):
                if i == 0 or i == len(main_contour)-1:
                    contour_points.append(main_contour[i][0].tolist())
                    continue
                    
                # 获取局部曲率
                prev_score = curvature_scores[i-1] if i >= 1 else 0
                curr_score = curvature_scores[i] if i < len(curvature_scores) else 0
                max_curvature = max(prev_score, curr_score)
                
                epsilon = base_epsilon
                if max_curvature < 60:
                    start_idx = max(0, i-1)
                    end_idx = min(len(main_contour), i+2)
                    segment = main_contour[start_idx:end_idx]
                    
                    if len(segment) > 2:
                        segment = segment.reshape(-1, 1, 2)
                        simplified = cv2.approxPolyDP(segment, epsilon, False)
                        
                        if len(simplified) == 3:
                            contour_points.append(simplified[1][0].tolist())
                    else:
                        contour_points.append(main_contour[i][0].tolist())
                else:
                    contour_points.append(main_contour[i][0].tolist())
        else:
            contour_points = main_contour.squeeze().tolist()
        
        # 确保格式正确
        if len(contour_points) > 0 and not isinstance(contour_points[0], list):
            contour_points = [contour_points]
        
        # === 新增: 固定间距采样 ===
        if len(contour_points) > 1:
            fixed_distance_contour = []
            # 计算固定距离点 (5像素)
            fixed_distance = 5
            
            # 添加起点
            start_point = np.array(contour_points[0])
            fixed_distance_contour.append(start_point.tolist())
            segment_start = start_point
            remaining_distance = fixed_distance
            
            # 遍历所有轮廓点
            for i in range(1, len(contour_points)):
                current_point = np.array(contour_points[i])
                segment_vector = current_point - segment_start
                segment_length = np.linalg.norm(segment_vector)
                
                # 如果当前段长度大于剩余距离
                while segment_length > remaining_distance:
                    # 计算新点位置
                    ratio = remaining_distance / segment_length
                    new_point = segment_start + ratio * segment_vector
                    fixed_distance_contour.append(new_point.tolist())
                    
                    # 更新剩余距离和起点
                    segment_start = new_point
                    segment_vector = current_point - segment_start
                    segment_length = np.linalg.norm(segment_vector)
                    remaining_distance = fixed_distance
                else:
                    # 更新剩余距离
                    remaining_distance -= segment_length
                    segment_start = current_point
                    
            # 确保闭合（如果起点和终点不同）
            if len(fixed_distance_contour) > 2:
                last_point = np.array(fixed_distance_contour[-1])
                first_point = np.array(fixed_distance_contour[0])
                distance_to_start = np.linalg.norm(last_point - first_point)
                
                if distance_to_start > fixed_distance * 0.5:  # 如果不接近起点
                    segment_vector = first_point - last_point
                    segment_length = np.linalg.norm(segment_vector)
                    
                    # 添加点到起点
                    while segment_length > remaining_distance:
                        ratio = remaining_distance / segment_length
                        new_point = last_point + ratio * segment_vector
                        fixed_distance_contour.append(new_point.tolist())
                        last_point = new_point
                        segment_vector = first_point - last_point
                        segment_length = np.linalg.norm(segment_vector)
                        remaining_distance = fixed_distance
                        
                    # 添加起点
                    fixed_distance_contour.append(first_point.tolist())
            
            # 使用固定间距的点替换原始点
            contour_points = fixed_distance_contour
            print(f"固定间距轮廓点生成 | 点数: {len(contour_points)} | 间距: {fixed_distance}像素")
        
        # 创建轮廓数据结构
        contour_data = {
            "confidence": box['confidence'],
            "class_id": box['class_id'],
            "bbox": box['bbox'],
            "contour_points": contour_points,
            "image_size": img_size
        }
        
        # 准备轮廓点用于绘制
        contour_array = np.array(contour_points, dtype=np.int32).reshape((-1, 1, 2))
        
        # 绘制轮廓
        overlay = image.copy()
        cv2.rectangle(overlay, (bbox_x1, bbox_y1), (bbox_x2, bbox_y2), (0, 255, 0), 2)
        cv2.drawContours(overlay, [contour_array], -1, (0, 0, 255), 3)
        
        if len(contour_points) > 0:
            start_point = tuple(map(int, contour_points[0]))
            cv2.circle(overlay, start_point, 8, (255, 0, 0), -1)
            cv2.putText(overlay, "Start", (start_point[0]+10, start_point[1]), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
        
        cv2.putText(overlay, f"Conf: {box['confidence']:.2f}", 
                (bbox_x1, bbox_y1 - 10), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        
        # 添加点数量显示
        cv2.putText(overlay, f"Points: {len(contour_points)}", 
                (10, image.shape[0] - 20), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
        
        return contour_data, overlay

    def stop(self):
        """停止所有操作"""
        self.stop_event.set()
        if self.receive_thread and self.receive_thread.is_alive():
            self.receive_thread.join(timeout=2.0)
        cv2.destroyAllWindows()
        print("[SYSTEM] 摄像头检测系统已停止")

class ONNXDetectionThread(QThread):
    """模型识别，自动标定"""
    update_frame = pyqtSignal(np.ndarray)         # 信号：更新视频帧
    detection_result = pyqtSignal(str)            # 信号：发送检测结果文本
    detection_coords = pyqtSignal(list)           # 信号：发送检测到的坐标列表
    single_image_result = pyqtSignal(np.ndarray, str)  # 信号：单张图像检测结果
    calibration_status = pyqtSignal(str)          # 信号：发送标定状态信息

    def __init__(self, model_path, ip='0.0.0.0', port=9999):
        super().__init__()
        self.model_path = model_path              # ONNX模型文件路径
        self.ip = ip                              # 摄像头IP地址
        self.port = port                          # 摄像头端口号
        # 创建摄像头检测系统实例
        self.detection_system = CameraDetectionSystem(model_path, server_ip=ip, server_port=port)
        self.active = False                       # 线程活动状态标志
        self.cap = None                           # OpenCV视频捕获对象
        self.camera_type = "local"                # 摄像头类型（local/network）
        self.single_image_path = None             # 单张图像检测路径
        self.model_shape = (640, 640)             # 模型输入尺寸
        self.conf_threshold = 0.25                # 目标检测置信度阈值
        self.num_classes = None                   # 模型类别数量
        self.total_frames = 0                     # 处理的总帧数
        self.prev_frame_time = 0                  # 上一帧处理时间
        self.start_time = time.time()             # 线程启动时间
        self.output_dir = "contour_data"          # 轮廓数据输出目录
        self.calibration_complete = False         # 标定状态

    def load_model(self):
        """加载模型并初始化检测系统"""
        try:
            # 确保检测系统正确加载模型
            if not self.detection_system.load_model():
                return False

            # 获取模型输入尺寸
            input_info = self.detection_system.session.get_inputs()[0]
            input_shape = input_info.shape
            if len(input_shape) == 4: 
                height = input_shape[2]
                width = input_shape[3]
                self.model_shape = (width, height)
            elif len(input_shape) == 3: 
                height = input_shape[1]
                width = input_shape[2]
                self.model_shape = (width, height)
            else:
                self.model_shape = (640, 640)

            # 动态检测类别数量
            output_info = self.detection_system.session.get_outputs()[0]
            output_shape = output_info.shape
            if len(output_shape) >= 2:
                self.num_classes = output_shape[1] - 4
            else:
                self.num_classes = 80
            self.detection_system.load_calibration_params()
            return True
        except Exception as e:
            print(f"加载模型失败: {str(e)}")
            return False

    def run(self):
        try:
            if not self.load_model():
                self.detection_result.emit("ONNX模型加载失败")
                return

            # 单张图像处理流程
            if self.single_image_path:
                self.process_single_image(self.single_image_path)
                return 

            self.active = True

            # 只有在网络摄像头模式下才启动接收
            if self.camera_type == "network":
                self.detection_system.start_receiving()

            # 视频流处理主循环
            self.process_video_stream()
        except Exception as e:
            print(f"检测线程异常: {str(e)}")
            import traceback
            traceback.print_exc()
        finally:
            self.cleanup_resources()
            self.active = False

    def set_camera_type(self, camera_type):
        """设置摄像头类型"""
        self.camera_type = camera_type

    def cleanup_resources(self):
        """清理线程资源"""
        print("[THREAD] 清理资源")
        # 关闭检测系统
        if hasattr(self.detection_system, 'stop'):
            self.detection_system.stop_event.set()
            if self.detection_system.receive_thread and self.detection_system.receive_thread.is_alive():
                self.detection_system.receive_thread.join(timeout=0.5)

        # 关闭本地摄像头
        if self.cap and self.cap.isOpened():
            self.cap.release()
            print("[THREAD] 本地摄像头已释放")

        # 关闭OpenCV窗口
        cv2.destroyAllWindows()

    def perform_calibration(self):
        """执行标定流程"""
        if not hasattr(self, 'detection_system') or self.detection_system is None:
            print("[ERROR] 检测系统未初始化")
            self.calibration_status.emit("标定失败：检测系统未初始化")
            return
        if self.detection_system.calibration_complete:
            self.detection_system.calibration_complete = False

        # 启动标定线程
        threading.Thread(target=self._calibration_task, daemon=True).start()
        self.calibration_status.emit("标定已启动...")

    def _calibration_task(self):
        """后台标定任务"""
        print("[CALIBRATION] 开始自动标定")
        self.calibration_status.emit("开始标定，请确保ArUco标记在视野中")

        # 重置状态
        self.detection_system.calibration_complete = False
        self.detection_system.calibration_in_progress = True
        self.detection_system.calibration_frames = []

        # 收集标定帧
        collected_frames = 0
        max_frames = 30
        timeout = time.time() + 30

        while collected_frames < max_frames and time.time() < timeout and self.active:
            try:
                frame = self.capture_frame()
                print(frame)
                if frame is None:
                    print("[CALIBRATION] 未获取到帧")
                    time.sleep(0.1)
                    continue

                # 处理ArUco标记
                result = self.detection_system.detect_aruco_markers(frame)
                if result:
                    x1, x2, y1, y2 = result
                    self.detection_system.calibration_frames.append((x1, x2, y1, y2))
                    collected_frames += 1

                    # 发送标定状态
                    self.calibration_status.emit(f"采集帧: {collected_frames}/{max_frames}")

                    # 在图像上标记
                    frame_copy = frame.copy()
                    cv2.putText(frame_copy, f"Calibration: {collected_frames}/{max_frames}",
                                (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

                    # 显示标记位置
                    cv2.circle(frame_copy, (x1, y1), 10, (0, 255, 0), 2)
                    cv2.circle(frame_copy, (x2, y2), 10, (0, 255, 0), 2)

                    # 发射更新信号
                    self.update_frame.emit(frame_copy)
                else:
                    # 显示提示信息
                    frame_copy = frame.copy()
                    cv2.putText(frame_copy, "未检测到ArUco标记", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                    self.update_frame.emit(frame_copy)

            except Exception as e:
                print(f"[CALIBRATION ERROR] {str(e)}")
                import traceback
                traceback.print_exc()

            time.sleep(0.05)

        # 计算标定参数
        if len(self.detection_system.calibration_frames) >= 10:
            # 计算平均值
            sum_x1 = sum([x1 for x1, _, _, _ in self.detection_system.calibration_frames])
            sum_x2 = sum([x2 for _, x2, _, _ in self.detection_system.calibration_frames])
            sum_y1 = sum([y1 for _, _, y1, _ in self.detection_system.calibration_frames])
            sum_y2 = sum([y2 for _, _, _, y2 in self.detection_system.calibration_frames])

            self.detection_system.c_x = (sum_x1 + sum_x2) / (len(self.detection_system.calibration_frames) * 2.0)
            self.detection_system.c_y = (sum_y1 + sum_y2) / (len(self.detection_system.calibration_frames) * 2.0)

            avg_distance = 0
            for x1, x2, y1, y2 in self.detection_system.calibration_frames:
                avg_distance += ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
            avg_distance /= len(self.detection_system.calibration_frames)

            # 实际距离为220mm
            if avg_distance > 0:
                self.detection_system.ratio = 220.0 / avg_distance
            else:
                self.detection_system.ratio = 1.0

            self.detection_system.calibration_complete = True

            # 标定结果
            timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            calibration_data = f"{timestamp}: c_x={self.detection_system.c_x:.2f}, c_y={self.detection_system.c_y:.2f}, ratio={self.detection_system.ratio:.4f}\n"
            with open("calibration_log.txt", "a") as f:
                f.write(calibration_data)

            self.detection_system.save_calibration_params()

            print(f"[CALIBRATION] 标定完成! {calibration_data.strip()}")
            self.calibration_status.emit("标定成功!")
        else:
            print("[CALIBRATION] 标定失败: 采集帧不足")
            self.calibration_status.emit("标定失败: 标记未检测到")

        self.detection_system.calibration_in_progress = False

    def capture_frame(self):
        """从适当源捕获帧"""
        try:
            if self.camera_type == "network":
                # 增加重试机制
                for _ in range(10):  # 增加重试次数
                    if not self.detection_system.frame_queue.empty():
                        frame = self.detection_system.frame_queue.get()
                        return frame
                    time.sleep(0.05)
                return None
            else:  # 本地摄像头
                with threading.Lock():
                    if self.cap is None:
                        # 根据操作系统选择不同的后端
                        if sys.platform.startswith('linux'):
                            # Linux系统使用V4L2
                            self.cap = cv2.VideoCapture(20, cv2.CAP_V4L2)
                        elif sys.platform.startswith('win'):
                            # Windows系统使用DShow
                            self.cap = cv2.VideoCapture(20, cv2.CAP_DSHOW)
                        else:
                            # 其他系统使用默认后端
                            self.cap = cv2.VideoCapture(20)

                        if not self.cap.isOpened():
                            print("[ERROR] 无法打开本地摄像头")
                            return None

                ret, frame = self.cap.read()
                return frame
        except Exception as e:
            print(f"捕获帧错误: {str(e)}")
            return None

    def process_video_stream(self):
        """视频流处理主循环"""
        frame_count = 0
        
        # 初始化固定轮廓管理器
        contour_manager = FixedContourManager()
        fixed_contour = None
        fixed_contour_size = None
        
        # 确保输出目录存在
        os.makedirs(self.output_dir, exist_ok=True)
        print(f"确保输出目录已创建: {self.output_dir}")

        while self.active:
            frame = self.capture_frame()
            if frame is None:
                # 显示等待信息
                blank_frame = np.zeros((480, 640, 3), dtype=np.uint8)
                cv2.putText(blank_frame, "等待视频流...", (100, 240),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                self.update_frame.emit(blank_frame)
                time.sleep(0.1)
                continue

            # 处理当前帧
            processed_frame = frame.copy()
            frame_count += 1
            
            # 如果已经计算好固定轮廓
            if contour_manager.completed and fixed_contour:
                # 只绘制固定轮廓
                processed_frame = self.detection_system.draw_fixed_contour(processed_frame, fixed_contour, fixed_contour_size)
                
                # 只保存一次固定轮廓
                if frame_count == contour_manager.max_frames + 1:
                    self.detection_system.save_contour_data(contour_data, frame_count)
            else:
                # 如果正在标定，跳过常规检测
                if self.detection_system.calibration_in_progress:
                    time.sleep(0.05)
                    continue

                # 显示ArUco标记
                if not self.detection_system.calibration_complete:
                    result = self.detection_system.detect_aruco_markers(processed_frame)
                    if result:
                        x1, x2, y1, y2 = result
                        # 在图像上显示标记位置
                        cv2.circle(processed_frame, (x1, y1), 10, (0, 255, 0), 2)
                        cv2.circle(processed_frame, (x2, y2), 10, (0, 255, 0), 2)

                # 执行目标检测
                if self.detection_system.session and self.detection_system.calibration_complete:
                    try:
                        processed_frame, detections, world_coords = self.detection_system.detect_objects(processed_frame)
                        
                        # 准备检测结果
                        message = "未检测到目标"
                        contour_data = None
                        
                        if detections:
                            objects = [f"{det['label']}:{det['score']:.2f}" for det in detections]
                            message = ', '.join(objects)
                            
                            # 获取轮廓数据
                            if 'contour' in detections[0]:
                                contour_data = detections[0]['contour']
                                
                                # 添加到轮廓管理器
                                contour_manager.add_contour(contour_data)
                                
                                # 如果管理器完成处理，获取固定轮廓
                                if contour_manager.completed and not fixed_contour:
                                    fixed_contour, fixed_contour_size = contour_manager.get_fixed_contour()
                                    print(f"\n固定轮廓已生成 | 点数: {len(fixed_contour)}")
                                    self.detection_system.fixed_contour = fixed_contour
                                
                                # 保存轮廓数据
                                self.detection_system.save_contour_data(contour_data, frame_count)
                                print(f"\n检测到目标 | 类别: {contour_data['class_id']} | 置信度: {contour_data['confidence']:.2f}")
                        
                        # 发射检测信号
                        self.detection_result.emit(message)
                        self.detection_coords.emit(world_coords)
                    except Exception as e:
                        print(f"目标检测错误: {str(e)}")
                        import traceback
                        traceback.print_exc()
            
            # 显示标定状态
            status_text = "Calibration: " + (
                "COMPLETE" if self.detection_system.calibration_complete
                else "IN PROGRESS" if self.detection_system.calibration_in_progress
                else "INCOMPLETE"
            )
            status_color = (0, 255, 0) if self.detection_system.calibration_complete else (0, 0, 255)
            cv2.putText(processed_frame, status_text, (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, status_color, 2)

            # 显示转换参数
            if self.detection_system.calibration_complete:
                params_text = f"c_x:{self.detection_system.c_x:.1f} c_y:{self.detection_system.c_y:.1f} r:{self.detection_system.ratio:.4f}"
                cv2.putText(processed_frame, params_text, (10, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
            
            status_text = "Fixed" if fixed_contour else f"Collecting: {len(contour_manager.raw_contours)}/{contour_manager.max_frames}"
            
            # 绘制叠加信息
            cv2.putText(processed_frame, f"Frame: {frame_count}", 
                    (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
            cv2.putText(processed_frame, f"Status: {status_text}", 
                    (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)

            # 点数量显示
            if contour_manager.raw_contours:
                points_count = len(contour_manager.raw_contours[-1]['contour_points'])
                cv2.putText(processed_frame, f"Points: {points_count}", 
                        (10, processed_frame.shape[0] - 20), 
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

            # 发射更新帧信号
            self.update_frame.emit(processed_frame)
            
            time.sleep(0.01)

    def process_single_image(self, img_path):
        """处理单张图像检测"""
        try:
            frame = cv2.imread(img_path)
            if frame is None:
                raise ValueError(f"无法读取图像: {img_path}")
            
            # 临时的轮廓管理器
            contour_manager = FixedContourManager(max_frames=1)
            fixed_contour = None
            fixed_contour_size = None

            # 确保检测系统已加载模型
            if not self.detection_system.session:
                self.load_model()

            # 处理当前帧
            orig_h, orig_w = frame.shape[:2]
            img_size = (orig_w, orig_h)
            
            # 使用新的推理流程
            boxes, masks, protos, _ = self.detection_system.infer(frame)
            contour_data, processed_frame = self.detection_system.extract_contour_data(frame, boxes, masks, protos, img_size)
            
            # 添加到轮廓管理器
            if contour_data:
                contour_manager.add_contour(contour_data)
                print(f"检测到目标 | 类别: {contour_data['class_id']} | 置信度: {contour_data['confidence']:.2f}")
                
                # 保存轮廓数据
                frame_count = 0  # 单帧处理使用0作为帧号
                self.save_contour_data(contour_data, frame_count, False)
                
                # 如果管理器完成处理，获取固定轮廓
                if contour_manager.completed and not fixed_contour:
                    fixed_contour, fixed_contour_size = contour_manager.get_fixed_contour()
                    print(f"\n生成固定轮廓 | 点数: {len(fixed_contour)}")
                    
                    # 绘制固定轮廓
                    processed_frame = self.draw_fixed_contour(processed_frame, fixed_contour, img_size)
                    
                    # 保存固定轮廓数据
                    fixed_contour_data = {
                        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
                        "confidence": 1.0,
                        "class_id": contour_manager.tracking_id or -1,
                        "bbox": [0, 0, img_size[0], img_size[1]],
                        "contour_points": fixed_contour,
                        "image_size": img_size,
                        "points_count": len(fixed_contour)
                    }
                    self.save_contour_data(fixed_contour_data, frame_count, True)
            
            # 准备结果文本
            result_text = "未检测到目标"
            if contour_data:
                objects = [f"{contour_data['class_id']}:{contour_data['confidence']:.2f}"]
                result_text = ', '.join(objects)
                
                # 添加点数量信息
                result_text += f" | 轮廓点: {len(contour_data['contour_points'])}"
            
            # 添加状态信息到图像
            cv2.putText(processed_frame, f"图像: {os.path.basename(img_path)}", 
                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
            cv2.putText(processed_frame, f"结果: {result_text}", 
                    (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
            
            # 显示点数量
            if contour_data:
                points_count = len(contour_data['contour_points'])
                cv2.putText(processed_frame, f"Points: {points_count}", 
                        (10, processed_frame.shape[0] - 20), 
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

            # 发射结果信号
            self.single_image_result.emit(processed_frame, result_text)

        except Exception as e:
            error_msg = f"处理图像时出错: {str(e)}"
            print(error_msg)
            import traceback
            traceback.print_exc()
            self.single_image_result.emit(None, error_msg)

    def extract_contour_data(self, image, boxes, masks, protos, img_size):
        """单图像轮廓提取"""
        # 如果没有检测到目标，直接返回
        if not boxes:
            return None, image
        
        # 获取目标边界框信息
        x, y, w, h = boxes[0]['bbox']
        bbox_x1 = int(x - w/2)
        bbox_y1 = int(y - h/2)
        bbox_x2 = int(x + w/2)
        bbox_y2 = int(y + h/2)
        
        # 处理掩膜
        masks_np = np.stack(masks, axis=0)
        protos_flat = protos.reshape(protos.shape[0], -1)
        mask_output = masks_np @ protos_flat
        mask_output = 1 / (1 + np.exp(-mask_output))
        mask_output = mask_output.reshape(-1, protos.shape[1], protos.shape[2])
        m = mask_output[0]
        m = cv2.resize(m, img_size, interpolation=cv2.INTER_LINEAR)
        
        # 二值化掩膜
        _, binary_mask = cv2.threshold(m, 0.5, 255, cv2.THRESH_BINARY)
        binary_mask = binary_mask.astype(np.uint8)
        
        # 查找主轮廓
        contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        if not contours:
            return None, image
        
        # 选择最大的轮廓
        main_contour = max(contours, key=cv2.contourArea)
        
        # 简化轮廓
        epsilon = 0.005 * cv2.arcLength(main_contour, True)
        simplified = cv2.approxPolyDP(main_contour, epsilon, True)
        
        # 转换为点列表
        contour_points = [point[0].tolist() for point in simplified]
        
        # 创建轮廓数据结构
        contour_data = {
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
            "confidence": boxes[0]['confidence'],
            "class_id": boxes[0]['class_id'],
            "bbox": boxes[0]['bbox'],
            "contour_points": contour_points,
            "image_size": img_size
        }
        
        # 准备轮廓点用于绘制
        contour_array = np.array(contour_points, dtype=np.int32).reshape((-1, 1, 2))
        
        # 绘制轮廓
        overlay = image.copy()
        cv2.rectangle(overlay, (bbox_x1, bbox_y1), (bbox_x2, bbox_y2), (0, 255, 0), 2)
        cv2.drawContours(overlay, [contour_array], -1, (0, 0, 255), 3)
        
        # 添加点数量显示
        cv2.putText(overlay, f"Points: {len(contour_points)}", 
                (10, image.shape[0] - 20), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
        
        return contour_data, overlay

    def set_single_image(self, image_path):
        """设置单张图像检测路径"""
        self.single_image_path = image_path

    def calibrate_single_image(self, img_path):
        """使用单张图像进行标定"""
        try:
            frame = cv2.imread(img_path)
            if frame is None:
                raise ValueError(f"无法读取图像: {img_path}")

            self.calibration_status.emit("开始标定单张图像")

            # 重置标定状态
            self.detection_system.calibration_complete = False
            self.detection_system.calibration_in_progress = True
            self.detection_system.calibration_frames = []

            # 处理图像
            processed_frame = frame.copy()
            result = self.detection_system.detect_aruco_markers(processed_frame)

            if not result:
                self.calibration_status.emit("标定失败: 未检测到ArUco标记")
                return False

            x1, x2, y1, y2 = result
            self.detection_system.calibration_frames.append((x1, x2, y1, y2))

            # 计算标定参数
            self.detection_system.c_x = (x1 + x2) / 2.0
            self.detection_system.c_y = (y1 + y2) / 2.0

            # 计算距离
            distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
            if distance > 0:
                self.detection_system.ratio = 220.0 / distance
            else:
                self.detection_system.ratio = 1.0

            self.detection_system.calibration_complete = True

            # 记录标定结果
            timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            calibration_data = f"{timestamp}: c_x={self.detection_system.c_x:.2f}, c_y={self.detection_system.c_y:.2f}, ratio={self.detection_system.ratio:.4f}\n"
            with open("calibration_log.txt", "a") as f:
                f.write(calibration_data)

            print(f"[CALIBRATION] 标定完成! {calibration_data.strip()}")
            self.calibration_status.emit("标定成功!")

            # 在图像上显示标定结果
            cv2.putText(processed_frame, "Calibration: COMPLETE",
                        (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            params_text = f"c_x:{self.detection_system.c_x:.1f} c_y:{self.detection_system.c_y:.1f} r:{self.detection_system.ratio:.4f}"
            cv2.putText(processed_frame, params_text, (10, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)

            # 发射更新帧信号
            self.update_frame.emit(processed_frame)
            # 标定完成后重置检测状态
            self.detection_system.reset_for_detection()

            return True

        except Exception as e:
            print(f"[CALIBRATION ERROR] {str(e)}")
            self.calibration_status.emit(f"标定失败: {str(e)}")
            return False

    def stop(self):
        """线程安全停止"""
        self.active = False
        self.cleanup_resources()
        self.wait(2000)

class MotorController:
    def __init__(self, port='/dev/ttyCH343USB0', baudrate=115200, address=0x00):
        """电机控制器初始化"""
        # 创建串口连接对象
        self.ser = serial.Serial(
            port=port,                  # 串口设备路径
            baudrate=baudrate,          # 通信波特率
            bytesize=serial.EIGHTBITS,  # 数据位：8位
            parity=serial.PARITY_NONE,  # 校验位：无校验
            stopbits=serial.STOPBITS_ONE, # 停止位：1位
            timeout=0.1                 # 超时时间：0.1秒
        )
        self.address = address          # Modbus设备地址
        # 创建CRC16校验函数
        self.crc16 = crcmod.mkCrcFun(
            0x18005,     # CRC多项式
            rev=True,    # 反转输入字节
            initCrc=0xFFFF,  # 初始CRC值
            xorOut=0x0000    # 最终异或值
        )

    def _create_command(self, function_code, register, value):
        """创建Modbus RTU命令帧"""
        frame = bytearray([
            self.address,
            function_code,
            (register >> 8) & 0xFF,
            register & 0xFF,
            (value >> 8) & 0xFF,
            value & 0xFF
        ])
        crc = self.crc16(frame)
        frame.append(crc & 0xFF)
        frame.append((crc >> 8) & 0xFF)
        return frame

    def send_command(self, command):
        """发送命令并获取响应"""
        self.ser.write(command)
        time.sleep(0.05)
        return self.ser.read(8)

    def start(self):
        """启动电机"""
        cmd = self._create_command(0x06, 0x0052, 0x0001)
        return self.send_command(cmd)

    def stop(self):
        """复位电机"""
        cmd = self._create_command(0x06, 0x0050, 0x0001)
        return self.send_command(cmd)

    def forward(self):
        """正转"""
        cmd = self._create_command(0x06, 0x0054, 0x0001)
        return self.send_command(cmd)

    def reverse(self):
        """反转"""
        cmd = self._create_command(0x06, 0x0056, 0x0001)
        return self.send_command(cmd)

    def emergency_stop(self):
        """急停"""
        cmd = self._create_command(0x06, 0x0058, 0x0001)
        return self.send_command(cmd)

    def set_speed(self, speed):
        """设置电机转速(0-500)"""
        if not 0 <= speed <= 500:
            raise ValueError("速度值必须在0-500范围内")
        cmd = self._create_command(0x06, 0x005A, speed)
        return self.send_command(cmd)

    def close(self):
        """关闭串口连接"""
        self.ser.close()

class RobotConnection:
    """机械臂连接管理器"""

    def __init__(self, ip, port):
        self.ip = ip                    # 机器人IP地址
        self.port = port                # 机器人端口号
        self.mc = None                  # 机器人连接对象
        self.connected = False          # 连接状态标志
        self.heartbeat_active = True    # 心跳检测活动状态
        self.heartbeat_thread = None    # 心跳检测线程对象
        self.reconnect_queue = Queue()  # 重连队列
        self.lock = threading.Lock()    # 线程锁
        self.connect()                  # 初始化时立即尝试连接
        self.start_heartbeat()          # 启动心跳检测线程
        self.reconnect_lock = threading.Lock()  # 重连锁
        self.reconnecting = False       # 重连状态标志

    def connect(self):
        """尝试连接机械臂"""
        try:
            with self.lock:
                if self.mc:
                    try:
                        self.mc.close()
                    except:
                        pass

                # self.mc = MyCobot280Socket(self.ip, self.port)
                self.mc = MyCobot280("/dev/ttyAMA0", 1000000)
                test_result = self.mc.is_controller_connected()
                if test_result in [0, 1]:
                    self.connected = True
                    print(f"成功连接到机械臂: {self.ip}:{self.port}")
                    return True
                else:
                    print(f"连接测试失败: {test_result}")
                    return False
        except Exception as e:
            print(f"连接失败: {str(e)}")
            self.connected = False
            return False

    def disconnect(self):
        """断开连接"""
        try:
            with self.lock:
                if self.mc:
                    self.mc.close()
                self.connected = False
                print("已断开机械臂连接")
                return True
        except:
            return False

    def reconnect(self):
        """安全的重新连接方法"""
        with self.reconnect_lock:  # 获取重连锁
            if self.reconnecting:
                print("重连操作已在其他线程进行")
                return False
                
            try:
                self.reconnecting = True
                # 重连
                self.disconnect()
                time.sleep(0.5)
                result = self.connect()
                return result
            finally:
                self.reconnecting = False

    def start_heartbeat(self):
        """启动心跳检测线程"""
        if not self.heartbeat_thread or not self.heartbeat_thread.is_alive():
            self.heartbeat_active = True
            self.heartbeat_thread = threading.Thread(target=self.heartbeat_check, daemon=True)
            self.heartbeat_thread.start()
            print("心跳检测已启动")

    def stop_heartbeat(self):
        """停止心跳检测"""
        self.heartbeat_active = False
        if self.heartbeat_thread and self.heartbeat_thread.is_alive():
            self.heartbeat_thread.join(timeout=2.0)
        print("心跳检测已停止")

    def heartbeat_check(self):
        """心跳检测循环"""
        while self.heartbeat_active:
            time.sleep(5)
            
            # 如果正在重连则跳过
            if self.reconnecting:
                continue
                
            try:
                # 检查连接状态
                if not self.mc:
                    self.connected = False
                    print("机械臂对象未初始化")
                    continue
                    
                status = self.mc.is_controller_connected()
                if status != 1:
                    print(f"心跳检测失败: {status}")
                    self.connected = False
                    
                    # 使用带锁的重连方法
                    if self.reconnect_lock.acquire(blocking=False):
                        try:
                            print("开始重新连接...")
                            self.reconnect()
                        finally:
                            self.reconnect_lock.release()
                    else:
                        print("重连操作已由其他线程启动")
                else:
                    self.connected = True
            except Exception as e:
                print(f"心跳检测异常: {str(e)}")
                self.connected = False

    def is_connected(self):
        """返回连接状态"""
        return self.connected

    def get_robot(self):
        """获取机械臂对象，确保线程安全"""
        if not self.connected:
            print("等待重新连接...")
            if not self.reconnect_queue.get(timeout=30):
                raise ConnectionError("无法重新连接机械臂")
        with self.lock:
            return self.mc

class ManualControlDialog(QDialog):
    """手动控制对话框"""

    def __init__(self, connection, parent=None):
        super().__init__(parent)
        self.setWindowTitle("手动控制模式")
        self.setGeometry(200, 200, 500, 600)
        self.setStyleSheet("""
            QDialog {
                background-color: #2D2D30;
                color: #FFFFFF;
                font-family: Segoe UI;
            }
            QGroupBox {
                border: 1px solid #007ACC;
                border-radius: 6px;
                margin-top: 10px;
                padding-top: 15px;
                font-weight: bold;
            }
            QLabel {
                color: #CCCCCC;
            }
            QPushButton {
                background-color: #007ACC;
                color: white;
                border: none;
                border-radius: 4px;
                padding: 6px 12px;
                font-weight: bold;
            }
            QPushButton:hover {
                background-color: #1C97EA;
            }
            QPushButton:disabled {
                background-color: #505050;
                color: #A0A0A0;
            }
        """)

        self.connection = connection
        self.init_ui()
        QTimer.singleShot(100, self.update_all_status)

        # 添加自动更新状态计时器
        self.status_timer = QTimer(self)
        self.status_timer.timeout.connect(self.update_joint_status)
        self.status_timer.start(5000) 

    def init_ui(self):
        """初始化UI"""
        main_layout = QVBoxLayout()
        main_layout.setSpacing(15)
        main_layout.setContentsMargins(15, 15, 15, 15)

        # 关节控制区域
        joint_group = QGroupBox("关节控制")
        joint_layout = QGridLayout()
        joint_layout.setHorizontalSpacing(10)
        joint_layout.setVerticalSpacing(10)

        # 创建关节标签和按钮
        self.joint_btns = []
        for i in range(1, 7):
            label = QLabel(f"关节 {i}:")
            label.setAlignment(Qt.AlignCenter)

            enable_btn = QPushButton("使能")
            enable_btn.setFixedHeight(30)

            release_btn = QPushButton("放松")
            release_btn.setFixedHeight(30)

            status_label = QLabel("状态: 未知")

            self.joint_btns.append({
                'enable_btn': enable_btn,
                'release_btn': release_btn,
                'status_label': status_label
            })

            enable_btn.clicked.connect(lambda _, idx=i: self.focus_joint(idx))
            release_btn.clicked.connect(lambda _, idx=i: self.release_joint(idx))

            row = i - 1
            joint_layout.addWidget(label, row, 0)
            joint_layout.addWidget(enable_btn, row, 1)
            joint_layout.addWidget(release_btn, row, 2)
            joint_layout.addWidget(status_label, row, 3)

        # 添加全部关节控制按钮
        all_joints_layout = QHBoxLayout()
        enable_all_btn = QPushButton("使能所有关节")
        enable_all_btn.setFixedHeight(35)
        release_2_6_btn = QPushButton("放松1-6关节")
        release_2_6_btn.setFixedHeight(35)

        enable_all_btn.clicked.connect(self.power_on_all_joints)
        release_2_6_btn.clicked.connect(lambda: self.release_joints([1, 2, 3, 4, 5, 6]))

        all_joints_layout.addWidget(enable_all_btn)
        all_joints_layout.addWidget(release_2_6_btn)

        joint_layout.addLayout(all_joints_layout, 6, 0, 1, 4)

        joint_group.setLayout(joint_layout)

        # 自由移动模式控制
        free_group = QGroupBox("自由移动模式")
        free_layout = QVBoxLayout()
        free_layout.setSpacing(10)

        self.free_mode_switch = QPushButton("点击按钮获取状态")
        self.free_mode_switch.setFixedHeight(40)
        self.free_mode_switch.clicked.connect(self.toggle_free_mode)

        check_status_btn = QPushButton("检查自由移动状态")
        check_status_btn.setFixedHeight(35)
        check_status_btn.clicked.connect(self.update_free_mode_status)

        free_layout.addWidget(self.free_mode_switch)
        free_layout.addWidget(check_status_btn)
        free_group.setLayout(free_layout)

        # 保存点功能
        save_group = QGroupBox("保存点")
        save_layout = QHBoxLayout()
        save_point_btn = QPushButton("保存当前点位")
        save_point_btn.setFixedHeight(40)
        save_point_btn.clicked.connect(self.save_current_point)
        save_layout.addWidget(save_point_btn)
        save_group.setLayout(save_layout)

        # 底部按钮
        bottom_layout = QHBoxLayout()
        exit_btn = QPushButton("退出手动模式")
        exit_btn.setFixedHeight(40)
        exit_btn.setStyleSheet("background-color: #FF4500;")
        exit_btn.clicked.connect(self.exit_manual_mode)

        refresh_btn = QPushButton("刷新状态")
        refresh_btn.setFixedHeight(35)
        refresh_btn.clicked.connect(self.update_all_status)

        bottom_layout.addWidget(refresh_btn)
        bottom_layout.addStretch()
        bottom_layout.addWidget(exit_btn)

        # 添加组件到主布局
        main_layout.addWidget(joint_group)
        main_layout.addWidget(free_group)
        main_layout.addWidget(save_group)
        main_layout.addLayout(bottom_layout)

        self.setLayout(main_layout)
        self.update_all_status()

    def update_all_status(self):
        """更新所有状态显示"""
        self.update_joint_status()
        self.update_free_mode_status()

    def update_joint_status(self):
        """更新关节状态显示"""
        if not self.connection or not self.connection.is_connected():
            for i in range(1, 7):
                self.joint_btns[i - 1]['status_label'].setText("状态: 未连接")
            return

        try:
            mc = self.connection.get_robot()
            voltages = mc.get_servo_voltages()
            temps = mc.get_servo_temps()

            for i in range(1, 7):
                enabled = mc.is_servo_enable(i) == 1

                # 获取电压和温度
                voltage = "N/A"
                temp = "N/A"
                if voltages and len(voltages) >= i:
                    voltage = voltages[i - 1]
                if temps and len(temps) >= i:
                    temp = temps[i - 1]

                status_text = f"电压: {voltage}V, 温度: {temp}°C"
                self.joint_btns[i - 1]['status_label'].setText(status_text)

                # 更新按钮状态
                self.joint_btns[i - 1]['enable_btn'].setEnabled(enabled)
                self.joint_btns[i - 1]['release_btn'].setEnabled(enabled)

        except Exception as e:
            print(f"更新关节状态出错: {str(e)}")
            # 设置错误状态
            for i in range(1, 7):
                self.joint_btns[i - 1]['status_label'].setText("状态: 获取失败")

    def update_free_mode_status(self):
        """更新自由移动模式状态"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "机械臂未连接")
            self.free_mode_switch.setText("状态: 未连接")
            self.free_mode_switch.setStyleSheet("background-color: #505050;")
            return

        try:
            mc = self.connection.get_robot()
            status = mc.is_free_mode() 
            if status == 1:
                self.free_mode_switch.setText("自由移动模式: 已启用 (点击关闭)")
                self.free_mode_switch.setStyleSheet("background-color: #00FF00; color: black;")
            elif status == 0:
                self.free_mode_switch.setText("自由移动模式: 已禁用 (点击开启)")
                self.free_mode_switch.setStyleSheet("background-color: #FF0000; color: white;")
            else:
                self.free_mode_switch.setText("状态未知")
                self.free_mode_switch.setStyleSheet("background-color: #505050;")
        except Exception as e:
            QMessageBox.critical(self, "错误", f"获取自由移动状态失败: {str(e)}")
            self.free_mode_switch.setText("状态: 错误")
            self.free_mode_switch.setStyleSheet("background-color: #FF4500;")

    def toggle_free_mode(self):
        """切换自由移动模式"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "机械臂未连接")
            return

        try:
            mc = self.connection.get_robot()
            current_status = mc.is_free_mode()
            if current_status == 1:
                result = mc.set_free_mode(0)
                if result == 1:
                    self.update_free_mode_status()
                    QMessageBox.information(self, "成功", "自由移动模式已禁用")
                else:
                    QMessageBox.warning(self, "失败", f"禁用自由移动模式失败 (返回值: {result})")
            else:
                result = mc.set_free_mode(1)
                if result == 1:
                    self.update_free_mode_status()
                    QMessageBox.information(self, "成功", "自由移动模式已启用")
                else:
                    QMessageBox.warning(self, "失败", f"启用自由移动模式失败 (返回值: {result})")
        except Exception as e:
            QMessageBox.critical(self, "错误", f"设置自由移动模式失败: {str(e)}")

    def focus_joint(self, joint_id):
        """上电指定关节"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "机械臂未连接")
            return

        try:
            mc = self.connection.get_robot()
            if 1 <= joint_id <= 6:
                result = mc.focus_servo(joint_id)
                if result == 1:
                    self.update_joint_status()
                    QMessageBox.information(self, "成功", f"关节 {joint_id} 已上电")
                else:
                    QMessageBox.warning(self, "失败", f"上电关节 {joint_id} 失败 (返回值: {result})")
            else:
                QMessageBox.warning(self, "错误", "无效的关节ID (1-6)")
        except Exception as e:
            QMessageBox.critical(self, "错误", f"上电关节时发生错误: {str(e)}")

    def release_joint(self, joint_id):
        """放松指定关节"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "机械臂未连接")
            return

        try:
            mc = self.connection.get_robot()
            if 1 <= joint_id <= 6:
                result = mc.release_servo(joint_id)
                self.update_joint_status()
                QMessageBox.information(self, "成功", f"关节 {joint_id} 已放松")
            else:
                QMessageBox.warning(self, "错误", "无效的关节ID (1-6)")
        except Exception as e:
            QMessageBox.critical(self, "错误", f"放松关节时发生错误: {str(e)}")

    def power_on_all_joints(self):
        """上电所有关节"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "机械臂未连接")
            return

        try:
            mc = self.connection.get_robot()
            result = mc.focus_all_servos()
            if result == 1:
                self.update_joint_status()
                QMessageBox.information(self, "成功", "所有关节已上电")
            else:
                QMessageBox.warning(self, "失败", f"上电所有关节失败 (返回值: {result})")
        except Exception as e:
            QMessageBox.critical(self, "错误", f"上电所有关节时发生错误: {str(e)}")

    def release_joints(self, joint_ids):
        """放松指定关节列表"""
        results = []

        for joint_id in joint_ids:
            try:
                mc = self.connection.get_robot()
                if 1 <= joint_id <= 6:
                    result = mc.release_servo(joint_id)
                    if result == 1:
                        results.append(True)
                    else:
                        results.append(False)
                        print(f"放松关节 {joint_id} 失败 (返回值: {result})")
                else:
                    print(f"无效的关节ID: {joint_id}")
            except Exception as e:
                print(f"放松关节 {joint_id} 时出错: {str(e)}")
                results.append(False)

        # 更新关节状态
        self.update_joint_status()

        # 检查是否所有操作都成功
        if all(results):
            QMessageBox.information(self, "操作警告", "部分关节放松失败，请查看日志")
        else:
            QMessageBox.warning(self, "操作完成", "已放松1-6关节")

    def save_current_point(self):
        """保存当前点位"""
        name, ok = QInputDialog.getText(self, "保存点位", "输入点位名称:")
        if ok and name:
            try:
                mc = self.connection.get_robot()
                coords = mc.get_coords()
                angles = mc.get_angles()

                if angles:
                    point = {
                        'name': name,
                        'coords': coords,
                        'angles': angles,
                        'positions': angles,
                        'timestamp': time.strftime("%Y-%m-%d %H:%M:%S")
                    }

                    QMessageBox.information(self, "保存成功", f"点位 '{name}' 已保存")
                    # 查询当前世界坐标系下的坐标
                    world_coords = mc.get_coords()
                    print("Current world coordinates:", world_coords)
                    if self.parent():
                        self.parent().save_teach_point_object(point)
                else:
                    QMessageBox.warning(self, "获取位置失败", "无法获取机器人当前位置")
            except Exception as e:
                QMessageBox.critical(self, "保存错误", f"保存示教点时出错: {str(e)}")
        else:
            QMessageBox.warning(self, "输入错误", "请输入有效的点位名称")

    def exit_manual_mode(self):
        """退出手动模式"""
        if self.connection and self.connection.is_connected():
            try:
                # 尝试禁用自由移动模式
                mc = self.connection.get_robot()
                if mc.is_free_mode() == 1:
                    mc.set_free_mode(0)
                    print("自由移动模式已禁用")

                # 尝试上电所有关节
                self.power_on_all_joints()
            except Exception as e:
                print(f"退出手动模式时出错: {str(e)}")

        self.accept()

class MotorControlDialog(QDialog):
    """电机控制对话框"""

    def __init__(self, parent=None):
        super().__init__(parent)
        self.setWindowTitle("电机控制")
        self.setGeometry(300, 300, 400, 400)
        self.motor = None
        self.init_ui()

    def init_ui(self):
        """初始化UI"""
        main_layout = QVBoxLayout()
        main_layout.setSpacing(15)
        main_layout.setContentsMargins(15, 15, 15, 15)

        # 状态指示区域
        status_group = QGroupBox("电机状态")
        status_layout = QVBoxLayout()

        self.status_label = QLabel("未连接")
        self.status_label.setAlignment(Qt.AlignCenter)
        self.status_label.setStyleSheet("font-size: 14px; font-weight: bold;")

        status_layout.addWidget(self.status_label)
        status_group.setLayout(status_layout)

        # 基本控制区域
        control_group = QGroupBox("基本控制")
        control_layout = QGridLayout()

        self.start_button = QPushButton("启动")
        self.start_button.clicked.connect(self.start_motor)

        self.stop_button = QPushButton("停止")
        self.stop_button.clicked.connect(self.stop_motor)

        self.forward_button = QPushButton("正转")
        self.forward_button.clicked.connect(self.forward_motor)

        self.reverse_button = QPushButton("反转")
        self.reverse_button.clicked.connect(self.reverse_motor)

        self.emergency_button = QPushButton("急停")
        self.emergency_button.setStyleSheet("background-color: #FF4500;")
        self.emergency_button.clicked.connect(self.emergency_stop)

        control_layout.addWidget(self.start_button, 0, 0)
        control_layout.addWidget(self.stop_button, 0, 1)
        control_layout.addWidget(self.forward_button, 1, 0)
        control_layout.addWidget(self.reverse_button, 1, 1)
        control_layout.addWidget(self.emergency_button, 2, 0, 1, 2)

        control_group.setLayout(control_layout)

        # 速度控制区域
        speed_group = QGroupBox("速度控制")
        speed_layout = QVBoxLayout()

        self.speed_slider = QSlider(Qt.Horizontal)
        self.speed_slider.setRange(0, 500)
        self.speed_slider.setValue(300)
        self.speed_slider.valueChanged.connect(self.update_speed_label)

        self.speed_label = QLabel("速度: 300")
        self.speed_label.setAlignment(Qt.AlignCenter)

        self.set_speed_button = QPushButton("设置速度")
        self.set_speed_button.clicked.connect(self.set_motor_speed)

        speed_layout.addWidget(self.speed_slider)
        speed_layout.addWidget(self.speed_label)
        speed_layout.addWidget(self.set_speed_button)
        speed_group.setLayout(speed_layout)

        # 连接按钮
        self.connect_button = QPushButton("连接电机")
        self.connect_button.clicked.connect(self.toggle_connection)
        self.connect_button.setFixedHeight(40)

        # 添加组件到主布局
        main_layout.addWidget(status_group)
        main_layout.addWidget(control_group)
        main_layout.addWidget(speed_group)
        main_layout.addWidget(self.connect_button)

        self.setLayout(main_layout)
        self.set_controls_enabled(False)

    def toggle_connection(self):
        """切换电机连接状态"""
        if self.connect_button.text() == "连接电机":
            try:
                self.motor = MotorController(port='/dev/ttyCH343USB0')
                self.status_label.setText("已连接")
                self.status_label.setStyleSheet("color: green; font-size: 14px; font-weight: bold;")
                self.connect_button.setText("断开连接")
                self.set_controls_enabled(True)
                QMessageBox.information(self, "连接成功", "已成功连接到电机控制器")
            except Exception as e:
                QMessageBox.critical(self, "连接失败", f"无法连接到电机控制器: {str(e)}")
                self.status_label.setText("连接失败")
                self.status_label.setStyleSheet("color: red; font-size: 14px; font-weight: bold;")
        else:
            if self.motor:
                try:
                    self.motor.close()
                except:
                    pass
                self.motor = None
            self.status_label.setText("未连接")
            self.status_label.setStyleSheet("color: gray; font-size: 14px; font-weight: bold;")
            self.connect_button.setText("连接电机")
            self.set_controls_enabled(False)
            QMessageBox.information(self, "断开连接", "已断开与电机控制器的连接")

    def set_controls_enabled(self, enabled):
        """设置控制按钮的启用状态"""
        self.start_button.setEnabled(enabled)
        self.stop_button.setEnabled(enabled)
        self.forward_button.setEnabled(enabled)
        self.reverse_button.setEnabled(enabled)
        self.emergency_button.setEnabled(enabled)
        self.speed_slider.setEnabled(enabled)
        self.set_speed_button.setEnabled(enabled)

    def update_speed_label(self, value):
        """更新速度标签"""
        self.speed_label.setText(f"速度: {value}")

    def start_motor(self):
        """启动电机"""
        if self.motor:
            try:
                self.motor.start()
                self.status_label.setText("电机已启动")
                QMessageBox.information(self, "成功", "电机已启动")
            except Exception as e:
                QMessageBox.critical(self, "错误", f"启动电机失败: {str(e)}")

    def stop_motor(self):
        """停止电机"""
        if self.motor:
            try:
                self.motor.stop()
                self.status_label.setText("电机已停止")
                QMessageBox.information(self, "成功", "电机已停止")
            except Exception as e:
                QMessageBox.critical(self, "错误", f"停止电机失败: {str(e)}")

    def forward_motor(self):
        """正转电机"""
        if self.motor:
            try:
                self.motor.forward()
                self.status_label.setText("电机正转中")
                QMessageBox.information(self, "成功", "电机正在正转")
            except Exception as e:
                QMessageBox.critical(self, "错误", f"设置正转失败: {str(e)}")

    def reverse_motor(self):
        """反转电机"""
        if self.motor:
            try:
                self.motor.reverse()
                self.status_label.setText("电机反转中")
                QMessageBox.information(self, "成功", "电机正在反转")
            except Exception as e:
                QMessageBox.critical(self, "错误", f"设置反转失败: {str(e)}")

    def emergency_stop(self):
        """急停电机"""
        if self.motor:
            try:
                self.motor.emergency_stop()
                self.status_label.setText("电机已急停")
                QMessageBox.information(self, "成功", "电机已执行急停")
            except Exception as e:
                QMessageBox.critical(self, "错误", f"执行急停失败: {str(e)}")

    def set_motor_speed(self):
        """设置电机转速"""
        if self.motor:
            speed = self.speed_slider.value()
            try:
                self.motor.set_speed(speed)
                self.status_label.setText(f"转速已设置为: {speed}")
                QMessageBox.information(self, "成功", f"电机转速已设置为: {speed}")
            except Exception as e:
                QMessageBox.critical(self, "错误", f"设置转速失败: {str(e)}")

    def closeEvent(self, event):
        """关闭窗口时断开连接"""
        if self.motor:
            try:
                self.motor.close()
            except:
                pass
        event.accept()

class RobotControlUI(QMainWindow):
    """机器人控制主界面"""

    def __init__(self):
        super().__init__()
        
        # ===== 窗口设置 =====
        self.setWindowTitle("机器人控制系统")      # 设置窗口标题
        self.setGeometry(100, 100, 1200, 800)    # 设置窗口位置和大小
        self.setMinimumSize(800, 600)             # 设置最小窗口尺寸
        self.setWindowIcon(QIcon("Icon.ico"))  # 设置窗口图标
        
        # ===== 机器人连接与状态 =====
        self.connection = None                     # 机器人连接对象
        self.mc = None                             # 机器人控制对象
        self.update_ui_state(False)                # 初始化UI状态
        
        # ===== 示教点管理 =====
        self.teach_points = []                     # 存储示教点列表
        self.current_point = None                  # 当前选中的示教点
        self.last_executed_point = None            # 最后执行的示教点
        self.load_teach_points()                   # 加载保存的示教点
        
        # ===== 执行控制相关 =====
        self.execution_paused = False              # 执行暂停标志
        self.execution_stopped = False             # 执行停止标志
        self.execution_thread = None               # 执行线程
        self.execution_progress = 0                # 执行进度
        self.verification_enabled = True          # 位置验证启用标志
        self.angle_tolerance = 3.0                 # 角度容差（度）
        self.coord_tolerance = 5.0                 # 坐标容差（毫米）
        
        # ===== 检测与打磨参数 =====
        self.detection_thread = None               # 目标检测线程
        self.detection_active = False              # 检测活动状态
        self.grinding_loops = 1                    # 打磨循环次数
        self.grinding_x_step = 0.0                 # X方向进深（毫米）
        self.grinding_y_step = 0.0                 # Y方向进深（毫米）
        self.grinding_z_step = 0.0                 # Z方向进深（毫米）
        self.path_scale_factor_X = 1.0             # 路径缩放比例X
        self.path_scale_factor_Y = 1.0             # 路径缩放比例Y
        self.grinding_current_loop = 0             # 当前打磨循环次数
        
        # ===== 摄像头与模型 =====
        self.camera_type = "local"                 # 摄像头类型（local/network）
        self.onnx_model_path = ""                  # ONNX模型路径
        self.init_ui()                             # 初始化用户界面
        
        # ===== 用户偏移量 =====
        self.user_offset_x = 0                     # 用户自定义X偏移量
        self.user_offset_y = 0                     # 用户自定义Y偏移量
        self.user_offset_z = 0                     # 用户自定义Z偏移量
        
        # ===== 音频与语音识别 =====
        self.audio_thread = None                   # 音频处理线程
        self.speech_recognition_active = False     # 语音识别激活状态
        
        # ===== 其他组件 =====
        self.motor_dialog = MotorControlDialog()   # 电机控制对话框
        self.motor_controller = MotorController()   # 电机控制器对象

    def init_ui(self):
        """初始化用户界面"""
        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        main_layout = QVBoxLayout(central_widget)
        main_layout.setSpacing(5)
        main_layout.setContentsMargins(5, 5, 5, 5)

        # 状态栏
        self.status_bar = self.statusBar()
        self.status_label = QLabel("系统就绪")
        self.status_bar.addWidget(self.status_label)

        self.audio_status_label = QLabel("麦克风: 未连接")
        self.status_bar.addWidget(self.audio_status_label)

        # === 主布局 ===
        main_splitter = QSplitter(Qt.Horizontal)

        # === 左侧区域：摄像头画面 ===
        camera_container = QWidget()
        camera_layout = QVBoxLayout(camera_container)

        detection_group = QGroupBox("摄像头画面")
        detection_layout = QVBoxLayout()

        self.detection_label = QLabel("摄像头未启动")
        self.detection_label.setAlignment(Qt.AlignCenter)
        self.detection_label.setMinimumSize(640, 480)
        self.detection_label.setStyleSheet("""
            QLabel {
                background-color: #3D3D40;
                border: 1px solid #007ACC;
                border-radius: 4px;
                min-height: 480px;
            }
        """)


        # 摄像头控制按钮
        cam_control_layout = QHBoxLayout()
        self.detect_button = QPushButton("启动检测")
        self.detect_button.setFixedHeight(40)
        self.detect_button.clicked.connect(self.toggle_detection)

        self.detect_image_btn = QPushButton("检测图像")
        self.detect_image_btn.setFixedHeight(40)
        self.detect_image_btn.clicked.connect(self.detect_image)

        cam_control_layout.addWidget(self.detect_button)
        cam_control_layout.addWidget(self.detect_image_btn)
        detection_layout.addLayout(cam_control_layout)

        detection_layout.addLayout(cam_control_layout)
        detection_group.setLayout(detection_layout)
        camera_layout.addWidget(detection_group)
        main_splitter.addWidget(camera_container)

        # === 中间区域：示教点管理 + 打磨控制 + 执行控制 ===
        center_container = QWidget()
        center_layout = QVBoxLayout(center_container)
        center_layout.setContentsMargins(0, 0, 0, 0)

        top_horizontal_layout = QHBoxLayout()
        top_horizontal_layout.setSpacing(15)

        # 示教点管理
        teach_group = QGroupBox("示教点管理")
        teach_layout = QVBoxLayout()

        # 保存示教点区域
        save_teach_layout = QHBoxLayout()
        self.teach_name_input = QLineEdit()
        self.teach_name_input.setPlaceholderText("输入示教点名称")
        self.save_teach_button = QPushButton("保存当前位置")
        self.save_teach_button.clicked.connect(self.save_teach_point)
        self.save_teach_button.setEnabled(False)
        save_teach_layout.addWidget(self.teach_name_input)
        save_teach_layout.addWidget(self.save_teach_button)
        teach_layout.addLayout(save_teach_layout)

        # 示教点列表
        self.teach_point_list = QListWidget()
        self.teach_point_list.setSelectionMode(QAbstractItemView.SingleSelection)
        self.teach_point_list.itemDoubleClicked.connect(self.move_to_teach_point)
        self.update_teach_point_list()
        teach_layout.addWidget(self.teach_point_list)

        # 操作按钮
        button_layout = QHBoxLayout()
        self.move_button = QPushButton("移动到选定点")
        self.move_button.clicked.connect(self.move_to_selected_point)
        self.move_button.setEnabled(False)

        self.delete_button = QPushButton("删除选定点")
        self.delete_button.clicked.connect(self.delete_selected_point)

        button_layout.addWidget(self.move_button)
        button_layout.addWidget(self.delete_button)
        teach_layout.addLayout(button_layout)

        teach_group.setLayout(teach_layout)
        top_horizontal_layout.addWidget(teach_group, 3)  # 占60%宽度

        # 打磨控制
        control_group = QGroupBox("打磨控制")
        control_layout = QVBoxLayout()

        # 坐标偏移设置
        offset_group = QGroupBox("坐标偏移设置")
        offset_layout = QGridLayout()

        # X偏移
        offset_layout.addWidget(QLabel("X偏移(mm):"), 0, 0)
        self.offset_x_input = QLineEdit("0")
        self.offset_x_input.setPlaceholderText("X偏移")
        self.offset_x_input.setValidator(QDoubleValidator(-50, 50, 2))
        offset_layout.addWidget(self.offset_x_input, 0, 1)

        # Y偏移
        offset_layout.addWidget(QLabel("Y偏移(mm):"), 1, 0)
        self.offset_y_input = QLineEdit("0")
        self.offset_y_input.setPlaceholderText("Y偏移")
        self.offset_y_input.setValidator(QDoubleValidator(-50, 50, 2))
        offset_layout.addWidget(self.offset_y_input, 1, 1)

        # Z偏移
        offset_layout.addWidget(QLabel("Z偏移(mm):"), 2, 0)
        self.offset_z_input = QLineEdit("0")
        self.offset_z_input.setPlaceholderText("Z偏移")
        self.offset_z_input.setValidator(QDoubleValidator(-50, 50, 2))
        offset_layout.addWidget(self.offset_z_input, 2, 1)

        # 应用偏移按钮
        self.apply_offset_btn = QPushButton("应用偏移")
        self.apply_offset_btn.setFixedHeight(35)
        self.apply_offset_btn.clicked.connect(self.apply_offsets)
        offset_layout.addWidget(self.apply_offset_btn, 3, 0, 1, 2)

        offset_group.setLayout(offset_layout)
        control_layout.addWidget(offset_group)

        # 打磨参数
        grinding_control_group = QGroupBox("打磨参数")
        grinding_control_layout = QGridLayout()

        # 循环次数
        grinding_control_layout.addWidget(QLabel("循环次数:"), 0, 0)
        self.loop_count_input = QLineEdit("1")
        self.loop_count_input.setValidator(QIntValidator(1, 100))
        grinding_control_layout.addWidget(self.loop_count_input, 0, 1)

        # X方向进深
        grinding_control_layout.addWidget(QLabel("X进深(mm):"), 1, 0)
        self.x_step_input = QLineEdit("0.0")
        self.x_step_input.setValidator(QDoubleValidator(-10.0, 10.0, 2))
        grinding_control_layout.addWidget(self.x_step_input, 1, 1)

        # Y方向进深
        grinding_control_layout.addWidget(QLabel("Y进深(mm):"), 2, 0)
        self.y_step_input = QLineEdit("0.0")
        self.y_step_input.setValidator(QDoubleValidator(-10.0, 10.0, 2))
        grinding_control_layout.addWidget(self.y_step_input, 2, 1)

        # Z方向进深
        grinding_control_layout.addWidget(QLabel("Z进深(mm):"), 3, 0)
        self.z_step_input = QLineEdit("0.0")
        self.z_step_input.setValidator(QDoubleValidator(-10.0, 10.0, 2))
        grinding_control_layout.addWidget(self.z_step_input, 3, 1)

        grinding_control_layout.addWidget(QLabel("路径缩放比例X:"), 4, 0)
        self.scale_factor_X_input = QLineEdit("1.0")
        self.scale_factor_X_input.setValidator(QDoubleValidator(0.1, 10.0, 10))
        grinding_control_layout.addWidget(self.scale_factor_X_input, 4, 1)

        grinding_control_layout.addWidget(QLabel("路径缩放比例Y:"), 5, 0)
        self.scale_factor_Y_input = QLineEdit("1.0")
        self.scale_factor_Y_input.setValidator(QDoubleValidator(0.1, 10.0, 10))
        grinding_control_layout.addWidget(self.scale_factor_Y_input, 5, 1)

        # 应用参数按钮
        self.apply_grinding_params_btn = QPushButton("应用参数")
        self.apply_grinding_params_btn.setFixedHeight(35)
        self.apply_grinding_params_btn.clicked.connect(self.apply_grinding_params)
        grinding_control_layout.addWidget(self.apply_grinding_params_btn, 6, 0, 1, 2)

        grinding_control_group.setLayout(grinding_control_layout)
        control_layout.addWidget(grinding_control_group)

        # 打磨按钮
        self.grinding_button = QPushButton("启动打磨")
        self.grinding_button.setFixedHeight(50)
        self.grinding_button.setFont(QFont("Arial", 12, QFont.Bold))
        self.grinding_button.clicked.connect(self.toggle_grinding)

        # 状态指示灯
        grinding_status_layout = QHBoxLayout()
        grinding_status_label = QLabel("打磨状态:")
        self.grinding_status_indicator = QLabel()
        self.grinding_status_indicator.setFixedSize(20, 20)
        self.grinding_status_indicator.setStyleSheet("background-color: #505050; border-radius: 10px;")

        grinding_status_layout.addWidget(grinding_status_label)
        grinding_status_layout.addWidget(self.grinding_status_indicator)
        grinding_status_layout.addStretch()

        # 打磨进度标签
        self.grinding_progress_label = QLabel("打磨: 未运行")
        grinding_status_layout.addWidget(self.grinding_progress_label)

        control_layout.addLayout(grinding_status_layout)
        control_layout.addWidget(self.grinding_button)
        control_group.setLayout(control_layout)
        top_horizontal_layout.addWidget(control_group, 2) 

        # 将水平布局添加到中心布局
        center_layout.addLayout(top_horizontal_layout, 1) 

        # 执行控制
        execution_group = QGroupBox("执行控制")
        execution_layout = QVBoxLayout()

        # XYZ移动控制
        xyz_group = QGroupBox("XYZ坐标移动")
        xyz_layout = QGridLayout()

        xyz_layout.addWidget(QLabel("目标坐标:"), 0, 0)
        self.target_x_input = QLineEdit()
        self.target_x_input.setPlaceholderText("X")
        xyz_layout.addWidget(self.target_x_input, 0, 1)

        self.target_y_input = QLineEdit()
        self.target_y_input.setPlaceholderText("Y")
        xyz_layout.addWidget(self.target_y_input, 0, 2)

        self.target_z_input = QLineEdit()
        self.target_z_input.setPlaceholderText("Z")
        xyz_layout.addWidget(self.target_z_input, 0, 3)

        self.move_xyz_button = QPushButton("移动")
        self.move_xyz_button.setFixedHeight(35)
        self.move_xyz_button.clicked.connect(self.move_to_xyz)
        xyz_layout.addWidget(self.move_xyz_button, 0, 4)

        # 角度修正复选框
        self.angle_correction_checkbox = QCheckBox("启用角度修正")
        self.angle_correction_checkbox.setChecked(True)
        xyz_layout.addWidget(self.angle_correction_checkbox, 1, 0, 1, 3)

        xyz_group.setLayout(xyz_layout)
        execution_layout.addWidget(xyz_group)

        # 执行选项
        options_layout = QHBoxLayout()
        self.move_type_combo = QComboBox()
        self.move_type_combo.addItems(["关节运动 (MOVEJ)", "直线运动 (MOVEL)"])
        self.speed_slider = QSlider(Qt.Horizontal)
        self.speed_slider.setRange(1, 100)
        self.speed_slider.setValue(50)
        self.speed_label = QLabel("速度: 50")

        options_layout.addWidget(QLabel("运动类型:"))
        options_layout.addWidget(self.move_type_combo)
        options_layout.addWidget(QLabel("速度:"))
        options_layout.addWidget(self.speed_slider)
        options_layout.addWidget(self.speed_label)
        execution_layout.addLayout(options_layout)

        # 执行按钮
        execute_layout = QHBoxLayout()
        self.execute_all_button = QPushButton("执行所有点")
        self.execute_all_button.clicked.connect(self.execute_all_points)
        self.execute_selected_button = QPushButton("执行选定点")
        self.execute_selected_button.clicked.connect(self.execute_selected_point)

        execute_layout.addWidget(self.execute_all_button)
        execute_layout.addWidget(self.execute_selected_button)
        execution_layout.addLayout(execute_layout)

        # 控制按钮
        control_layout = QHBoxLayout()
        self.pause_button = QPushButton("暂停")
        self.pause_button.clicked.connect(self.pause_execution)
        self.resume_button = QPushButton("恢复")
        self.resume_button.clicked.connect(self.resume_execution)
        self.stop_button = QPushButton("停止")
        self.stop_button.clicked.connect(self.stop_execution)

        control_layout.addWidget(self.pause_button)
        control_layout.addWidget(self.resume_button)
        control_layout.addWidget(self.stop_button)
        execution_layout.addLayout(control_layout)

        # 进度条
        self.progress_bar = QProgressBar()
        self.progress_bar.setRange(0, 100)
        self.progress_bar.setValue(0)
        execution_layout.addWidget(self.progress_bar)

        execution_group.setLayout(execution_layout)
        center_layout.addWidget(execution_group, 1) 

        main_splitter.addWidget(center_container)

        # === 右侧区域：连接设置和控制按钮 ===
        right_container = QWidget()
        right_layout = QVBoxLayout(right_container)

        # 网络摄像头设置
        camera_setting_group = QGroupBox("摄像头设置")
        camera_setting_layout = QVBoxLayout()

        camera_type_layout = QHBoxLayout()
        camera_type_layout.addWidget(QLabel("摄像头类型:"))
        self.camera_type_combo = QComboBox()
        self.camera_type_combo.addItems(["本地摄像头", "网络摄像头"])
        self.camera_type_combo.currentIndexChanged.connect(self.change_camera_type)
        camera_type_layout.addWidget(self.camera_type_combo)

        camera_setting_layout.addLayout(camera_type_layout)

        detection_layout.addWidget(self.detection_label)
        camera_status_layout = QHBoxLayout()
        self.camera_status_label = QLabel("摄像头: 关闭")
        self.camera_status_label.setAlignment(Qt.AlignCenter)
        camera_status_layout.addWidget(self.camera_status_label)
        camera_setting_layout.addLayout(camera_status_layout)

        camera_control_layout = QHBoxLayout()
        self.open_camera_btn = QPushButton("打开摄像头")
        self.open_camera_btn.setFixedHeight(40)
        self.open_camera_btn.clicked.connect(self.toggle_camera)
        camera_control_layout.addWidget(self.open_camera_btn)

        self.close_camera_btn = QPushButton("关闭摄像头")
        self.close_camera_btn.setFixedHeight(40)
        self.close_camera_btn.clicked.connect(self.close_camera)
        camera_control_layout.addWidget(self.close_camera_btn)
        camera_setting_layout.addLayout(camera_control_layout)

        ip_layout = QHBoxLayout()
        ip_layout.addWidget(QLabel("IP地址:"))
        self.camera_ip_input = QLineEdit("0.0.0.0")
        ip_layout.addWidget(self.camera_ip_input)

        camera_setting_layout.addLayout(ip_layout)

        port_layout = QHBoxLayout()
        port_layout.addWidget(QLabel("端口:"))
        self.camera_port_input = QLineEdit("9999")
        port_layout.addWidget(self.camera_port_input)

        camera_setting_layout.addLayout(port_layout)

        model_layout = QHBoxLayout()
        model_layout.addWidget(QLabel("ONNX模型:"))
        self.onnx_model_path_input = QLineEdit()
        self.onnx_model_path_input.setPlaceholderText("选择ONNX模型文件")
        model_layout.addWidget(self.onnx_model_path_input)

        self.load_model_btn = QPushButton("加载")
        self.load_model_btn.clicked.connect(self.load_onnx_model)
        model_layout.addWidget(self.load_model_btn)

        camera_setting_layout.addLayout(model_layout)

        # 标定按钮
        calibration_layout = QHBoxLayout()
        self.calibrate_button = QPushButton("开始标定")
        self.calibrate_button.setFixedHeight(40)
        self.calibrate_button.clicked.connect(self.start_calibration)
        calibration_layout.addWidget(self.calibrate_button)

        # 单张图像标定按钮
        self.single_image_calibrate_btn = QPushButton("使用单张图像标定")
        self.single_image_calibrate_btn.setFixedHeight(40)
        self.single_image_calibrate_btn.clicked.connect(self.calibrate_single_image)
        calibration_layout.addWidget(self.single_image_calibrate_btn)

        camera_setting_layout.addLayout(calibration_layout)

        # 标定文件操作按钮
        calibration_file_layout = QHBoxLayout()
        self.load_calibration_btn = QPushButton("加载标定文件")
        self.load_calibration_btn.setFixedHeight(40)
        self.load_calibration_btn.clicked.connect(self.load_calibration_file)

        self.save_calibration_btn = QPushButton("保存标定文件")
        self.save_calibration_btn.setFixedHeight(40)
        self.save_calibration_btn.clicked.connect(self.save_calibration_file)

        calibration_file_layout.addWidget(self.load_calibration_btn)
        calibration_file_layout.addWidget(self.save_calibration_btn)
        camera_setting_layout.addLayout(calibration_file_layout)

        camera_setting_group.setLayout(camera_setting_layout)
        right_layout.addWidget(camera_setting_group)

        # 机器人连接设置
        connection_group = QGroupBox("机器人连接")
        connection_layout = QVBoxLayout()

        ip_layout = QHBoxLayout()
        ip_layout.addWidget(QLabel("IP地址:"))
        self.ip_input = QLineEdit(ROBOT_IP)
        self.ip_input.setPlaceholderText("192.168.25.185")
        ip_layout.addWidget(self.ip_input)

        connection_layout.addLayout(ip_layout)

        port_layout = QHBoxLayout()
        port_layout.addWidget(QLabel("端口号:"))
        self.port_input = QLineEdit(str(ROBOT_PORT))
        self.port_input.setPlaceholderText("例如：8080")
        port_layout.addWidget(self.port_input)

        connection_layout.addLayout(port_layout)

        # 连接按钮
        self.connect_button = QPushButton("连接机器人")
        self.connect_button.setFixedHeight(40)
        self.connect_button.clicked.connect(self.toggle_connection)

        connection_layout.addWidget(self.connect_button)

        # 状态指示灯
        status_layout = QHBoxLayout()
        status_label = QLabel("机器人状态:")
        self.status_indicator = QLabel()
        self.status_indicator.setFixedSize(20, 20)
        self.status_indicator.setStyleSheet("background-color: #505050; border-radius: 10px;")

        status_layout.addWidget(status_label)
        status_layout.addWidget(self.status_indicator)
        status_layout.addStretch()

        connection_layout.addLayout(status_layout)

        # 手动控制按钮
        manual_control_btn = QPushButton("手动控制")
        manual_control_btn.setFixedHeight(40)
        manual_control_btn.clicked.connect(self.open_manual_control)

        connection_layout.addWidget(manual_control_btn)

        # 电机控制按钮
        motor_control_btn = QPushButton("电机控制")
        motor_control_btn.setFixedHeight(40)
        motor_control_btn.clicked.connect(self.open_motor_control)

        connection_layout.addWidget(motor_control_btn)

        # 语音识别按钮
        self.speech_recognition_btn = QPushButton("启动语音识别")
        self.speech_recognition_btn.setFixedHeight(40)
        self.speech_recognition_btn.clicked.connect(self.toggle_speech_recognition)
        connection_layout.addWidget(self.speech_recognition_btn)

        connection_group.setLayout(connection_layout)
        right_layout.addWidget(connection_group)

        main_splitter.addWidget(right_container)

        # 设置分割比例
        main_splitter.setSizes([500, 400, 300])
        main_layout.addWidget(main_splitter)

        # 连接信号
        self.speed_slider.valueChanged.connect(self.update_speed_label)

        # 设置全局样式
        self.setStyleSheet("""
            QMainWindow {
                background-color: #2D2D30;
            }
            QGroupBox {
                background-color: #3D3D40;
                color: #FFFFFF;
                border: 1px solid #007ACC;
                border-radius: 6px;
                margin-top: 10px;
                padding-top: 15px;
                font-weight: bold;
            }
            QLabel {
                color: #CCCCCC;
            }
            QPushButton {
                background-color: #007ACC;
                color: white;
                border: none;
                border-radius: 4px;
                padding: 8px 16px;
                font-weight: bold;
            }
            QPushButton:hover {
                background-color: #1C97EA;
            }
            QPushButton:disabled {
                background-color: #505050;
                color: #A0A0A0;
            }
            QLineEdit {
                background-color: #3D3D40;
                border: 1px solid #007ACC;
                border-radius: 4px;
                padding: 5px;
                color: #FFFFFF;
            }
            QListWidget {
                background-color: #3D3D40;
                border: 1px solid #007ACC;
                border-radius: 4px;
                color: #FFFFFF;
            }
            QListWidget::item {
                padding: 8px;
                border-bottom: 1px solid #505050;
            }
            QListWidget::item:selected {
                background-color: #007ACC;
                color: white;
            }
            QProgressBar {
                border: 1px solid #007ACC;
                border-radius: 4px;
                text-align: center;
                background-color: #3D3D40;
            }
            QProgressBar::chunk {
                background-color: #007ACC;
            }
            QSplitter::handle {
                background-color: #505050;
            }
        """)

    def apply_grinding_params(self):
        """应用打磨参数"""
        try:
            # 获取循环次数
            loops = int(self.loop_count_input.text())
            if loops < 1 or loops > 100:
                QMessageBox.warning(self, "参数错误", "循环次数必须在1-100之间")
                return
            self.grinding_loops = loops
            
            # 获取X进深
            x_step = float(self.x_step_input.text())
            if abs(x_step) > 10:
                QMessageBox.warning(self, "参数错误", "X进深不能超过±10mm")
                return
            self.grinding_x_step = x_step
            
            # 获取Y进深
            y_step = float(self.y_step_input.text())
            if abs(y_step) > 10:
                QMessageBox.warning(self, "参数错误", "Y进深不能超过±10mm")
                return
            self.grinding_y_step = y_step
            
            # 获取Z进深
            z_step = float(self.z_step_input.text())
            if abs(z_step) > 10:
                QMessageBox.warning(self, "参数错误", "Z进深不能超过±10mm")
                return
            self.grinding_z_step = z_step

            scale_factor_X = float(self.scale_factor_X_input.text())
            scale_factor_Y = float(self.scale_factor_Y_input.text())
            if scale_factor_X < 0.1 or scale_factor_X > 10.0:
                QMessageBox.warning(self, "参数错误", "缩放比例必须在0.1-10.0之间")
                return
            self.path_scale_factor_X = scale_factor_X
            if scale_factor_Y < 0.1 or scale_factor_Y > 10.0:
                QMessageBox.warning(self, "参数错误", "缩放比例必须在0.1-10.0之间")
                return
            self.path_scale_factor_Y = scale_factor_Y

            
            QMessageBox.information(self, "参数设置", 
                                   f"打磨参数已更新:\n循环次数: {self.grinding_loops}\n"
                                   f"X进深: {self.grinding_x_step}mm\n"
                                   f"Y进深: {self.grinding_y_step}mm\n"
                                   f"Z进深: {self.grinding_z_step}mm\n"
                                   f"X缩放比例: {scale_factor_X}mm"
                                   f"Y缩放比例: {scale_factor_Y}mm" )
        except ValueError:
            QMessageBox.warning(self, "输入错误", "请输入有效的数字参数")

    def update_frame(self, frame):
        """更新检测画面"""
        # 将OpenCV图像转换为Qt图像
        rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        bytes_per_line = ch * w
        q_img = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
        pixmap = QPixmap.fromImage(q_img)

        # 缩放图像以适应标签大小
        scaled_pixmap = pixmap.scaled(
            self.detection_label.width(),
            self.detection_label.height(),
            Qt.KeepAspectRatio,
            Qt.SmoothTransformation
        )

        self.detection_label.setPixmap(scaled_pixmap)

    def start_calibration(self):
        """启动摄像头标定"""
        # 确保检测线程已经启动
        if not self.detection_thread:
            self.create_detection_thread()

        if self.detection_thread and self.detection_thread.isRunning():
            try:
                # 确保标定信号正确连接
                if not hasattr(self.detection_thread, 'calibration_status_connected'):
                    self.detection_thread.calibration_status.connect(
                    )
                    self.detection_thread.calibration_status_connected = True

                self.detection_thread.perform_calibration()
            except Exception as e:
                QMessageBox.critical(self, "标定错误", f"标定过程中发生错误: {str(e)}")
        else:
            QMessageBox.warning(self, "错误", "无法启动标定，请先确保摄像头检测已正常启动")

    def update_detection_result(self, result):
        """更新检测结果文本"""

    # 添加新的标定文件操作方法
    def load_calibration_file(self):
        """加载标定文件"""
        options = QFileDialog.Options()
        file_path, _ = QFileDialog.getOpenFileName(
            self, "选择标定文件", "",
            "JSON文件 (*.json);;所有文件 (*)",
            options=options
        )

        if file_path:
            if self.detection_thread and self.detection_thread.detection_system:
                if self.detection_thread.detection_system.load_calibration_params(file_path):
                    QMessageBox.information(self, "加载成功", "标定参数已加载")
                    # 更新UI状态
                    self.calibration_status.setText("标定状态: 已加载")
                else:
                    QMessageBox.warning(self, "加载失败", "无法加载标定参数")

    def save_calibration_file(self):
        """保存标定文件"""
        options = QFileDialog.Options()
        file_path, _ = QFileDialog.getSaveFileName(
            self, "保存标定文件", "",
            "JSON文件 (*.json);;所有文件 (*)",
            options=options
        )

        if file_path:
            if self.detection_thread and self.detection_thread.detection_system:
                if self.detection_thread.detection_system.save_calibration_params(file_path):
                    QMessageBox.information(self, "保存成功", f"标定参数已保存到:\n{file_path}")
                else:
                    QMessageBox.warning(self, "保存失败", "无法保存标定参数")

    def toggle_detection(self):
        if hasattr(self, 'camera_thread') and self.camera_thread and self.camera_thread.isRunning():
            self.close_camera()
            time.sleep(0.5)
        if self.detect_button.text() == "启动检测":
            try:
                # 获取摄像头类型和参数
                camera_type = "network" if self.camera_type_combo.currentIndex() == 1 else "local"
                ip = self.camera_ip_input.text()
                port = int(self.camera_port_input.text())

                # 创建检测线程，传入正确的参数
                self.detection_thread = ONNXDetectionThread(
                    self.onnx_model_path,
                    ip=ip,
                    port=port
                )
                self.detection_thread.camera_type = camera_type

                # 连接信号
                self.detection_thread.update_frame.connect(self.update_frame)
                self.detection_thread.detection_result.connect(self.update_detection_result)
                self.detection_thread.detection_coords.connect(self.handle_detection_coords)
                # 确保线程被正确创建
                if not self.detection_thread:
                    raise RuntimeError("无法创建检测线程")
                # 启动线程
                self.detection_thread.start()
                self.detect_button.setText("停止检测")
                self.detect_button.setStyleSheet("background-color: #FF4D4D;")
            except Exception as e:
                QMessageBox.critical(self, "错误", f"无法启动目标检测: {str(e)}")
        else:
            # 停止摄像头检测
            if self.detection_thread and self.detection_thread.isRunning():
                self.detection_thread.stop()
                self.detection_thread.wait(2000)  # 等待线程安全退出
                self.detection_thread = None
                self.detect_button.setText("启动检测")
                self.detect_button.setStyleSheet("")
                self.detection_label.clear()
                self.detection_label.setText("摄像头未启动")
                self.detection_result.setText("检测已停止")
                cv2.destroyAllWindows()

    def handle_single_image_result(self, result_frame, result_text):
        """处理单张图像检测结果"""
        if result_frame is None:
            QMessageBox.warning(self, "错误", result_text)
            self.detection_label.setText("检测失败")
            self.detection_result.setText(result_text)
            return

        # 显示结果图像
        self.display_image(result_frame)

        # 显示检测结果
        self.detection_result.setText("检测结果: " + result_text)

    def display_image(self, frame):
        """显示图像在QLabel中"""
        rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        bytes_per_line = ch * w
        q_img = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
        pixmap = QPixmap.fromImage(q_img)

        # 缩放图像以适应标签大小
        scaled_pixmap = pixmap.scaled(
            self.detection_label.width(),
            self.detection_label.height(),
            Qt.KeepAspectRatio,
            Qt.SmoothTransformation
        )
        self.detection_label.setPixmap(scaled_pixmap)
    def load_yolo_model(self):
        """加载YOLO模型"""
        options = QFileDialog.Options()
        model_path, _ = QFileDialog.getOpenFileName(
            self, "选择YOLO模型文件", "",
            "模型文件 (*.pt);;所有文件 (*)",
            options=options
        )

        if model_path:
            global YOLO_MODEL_PATH
            YOLO_MODEL_PATH = model_path
            QMessageBox.information(self, "成功", f"已加载模型: {model_path}")

            # 重新初始化检测线程
            if self.detection_thread:
                self.detection_thread.stop()
                self.detection_thread = None
                self.detect_button.setText("启动检测")
                self.detect_button.setStyleSheet("")

    def camera_settings(self):
        """摄像头设置"""
        QMessageBox.information(self, "摄像头设置", "当前使用默认摄像头")

    def update_speed_label(self, value):
        """更新速度标签"""
        self.speed_label.setText(f"速度: {value}")

    def update_ui_state(self, connected):
        """更新UI状态"""
        if connected:
            self.connect_button.setText("断开连接")
            self.grinding_button.setEnabled(True)
            self.save_teach_button.setEnabled(True)
            self.move_button.setEnabled(True)
            self.execute_all_button.setEnabled(True)
            self.execute_selected_button.setEnabled(True)
            self.status_indicator.setStyleSheet("background-color: #00FF00; border-radius: 10px;")
        else:
            self.connect_button.setText("链接机器人")
            self.grinding_button.setEnabled(False)
            self.save_teach_button.setEnabled(False)
            self.move_button.setEnabled(False)
            self.execute_all_button.setEnabled(False)
            self.execute_selected_button.setEnabled(False)
            self.status_indicator.setStyleSheet("background-color: #FF0000; border-radius: 10px;")

    def toggle_connection(self):
        """切换连接状态"""
        if self.connect_button.text() == "链接机器人":
            ip = self.ip_input.text() or ROBOT_IP
            port = int(self.port_input.text() or ROBOT_PORT)
            self.start_audio_server()
            if not ip or not port:
                QMessageBox.warning(self, "输入错误", "请填写IP地址和端口号！")
                return

            print(f"正在连接到 {ip}:{port}...")
            try:
                self.connection = RobotConnection(ip, port)
                if self.connection.is_connected():
                    self.update_ui_state(True)
                else:
                    QMessageBox.critical(self, "连接失败", "无法连接到机械臂，请检查IP和端口")
            except Exception as e:
                QMessageBox.critical(self, "连接错误", f"连接过程中发生错误: {str(e)}")
        else:
            print("断开机器人连接...")
            if self.connection:
                self.connection.disconnect()
                self.connection.stop_heartbeat()
            self.stop_audio_server()
            self.update_ui_state(False)
            self.stop_speech_recognition()

    def open_manual_control(self):
        """打开手动控制对话框"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "请先连接机械臂")
            return

        dialog = ManualControlDialog(self.connection, self)
        dialog.exec()

    def open_motor_control(self):
        """打开电机控制对话框"""
        self.motor_dialog.show()

    def load_teach_points(self):
        """从文件加载保存的示教点"""
        try:
            if os.path.exists(TEACH_POINTS_FILE):
                with open(TEACH_POINTS_FILE, 'r') as f:
                    self.teach_points = json.load(f)
                print(f"成功加载 {len(self.teach_points)} 个示教点")
            else:
                self.teach_points = []
                print("没有找到示教点文件，将创建新文件")
        except Exception as e:
            print(f"加载示教点失败: {str(e)}")
            self.teach_points = []

    def save_teach_points(self):
        """保存示教点到文件"""
        try:
            with open(TEACH_POINTS_FILE, 'w') as f:
                json.dump(self.teach_points, f, indent=4)
            print("示教点已保存到文件")
            return True
        except Exception as e:
            print(f"保存示教点失败: {str(e)}")
            return False

    def update_teach_point_list(self):
        """更新示教点列表显示"""
        self.teach_point_list.clear()
        for point in self.teach_points:
            name = point.get('name', '未命名')
            if 'positions' in point:
                positions = ", ".join([f"{p:.2f}" for p in point['positions']])
            elif 'angles' in point:
                positions = ", ".join([f"{p:.2f}" for p in point['angles']])
            else:
                positions = "未知位置"
            time_str = point.get('timestamp', point.get('time', '未知时间'))
            item = QListWidgetItem(f"{name} - {time_str}\n位置: [{positions}]")
            self.teach_point_list.addItem(item)

    def save_teach_point_object(self, point):
        """保存示教点对象"""
        if any(p['name'] == point['name'] for p in self.teach_points):
            QMessageBox.warning(self, "名称重复", f"示教点名称 '{point['name']}' 已存在，请使用不同的名称")
            return

        self.teach_points.append(point)
        if self.save_teach_points():
            self.update_teach_point_list()
            QMessageBox.information(self, "保存成功", f"成功保存示教点: {point['name']}")
        else:
            self.teach_points.pop()
            QMessageBox.critical(self, "保存失败", "无法保存示教点到文件")

    def save_teach_point(self):
        """保存当前位置为示教点"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "未连接到机器人，无法保存示教点")
            return

        name = self.teach_name_input.text().strip()
        if not name:
            QMessageBox.warning(self, "输入错误", "请输入示教点名称")
            return

        if any(p.get('name') == name for p in self.teach_points):
            QMessageBox.warning(self, "名称重复", f"示教点名称 '{name}' 已存在，请使用不同的名称")
            return

        try:
            mc = self.connection.get_robot()
            coords = mc.get_coords()
            angles = mc.get_angles()

            if angles:
                point = {
                    'name': name,
                    'coords': coords,
                    'angles': angles,
                    'positions': angles,
                    'timestamp': time.strftime("%Y-%m-%d %H:%M:%S")
                }
                self.teach_points.append(point)

                if self.save_teach_points():
                    self.update_teach_point_list()
                    QMessageBox.information(self, "保存成功", f"成功保存示教点: {name}")
                    self.teach_name_input.clear()
                else:
                    self.teach_points.pop()
                    QMessageBox.critical(self, "保存失败", "无法保存示教点到文件")
            else:
                QMessageBox.warning(self, "获取位置失败", "无法获取机器人当前位置")
        except Exception as e:
            QMessageBox.critical(self, "保存错误", f"保存示教点时出错: {str(e)}")

    def get_selected_point(self):
        """获取选定的示教点"""
        selected_items = self.teach_point_list.selectedItems()
        if not selected_items:
            return None
        index = self.teach_point_list.row(selected_items[0])
        if 0 <= index < len(self.teach_points):
            return self.teach_points[index]
        return None

    def move_to_selected_point(self):
        """移动到选定的示教点"""
        point = self.get_selected_point()
        if point:
            self.move_to_teach_point(point)

    def move_to_teach_point(self, point=None):
        """移动到指定的示教点"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "未连接到机器人，无法移动")
            return

        if point is None:
            selected_items = self.teach_point_list.selectedItems()
            if selected_items:
                idx = self.teach_point_list.row(selected_items[0])
                point = self.teach_points[idx] if idx < len(self.teach_points) else None

        if not point:
            return

        try:
            point_name = point.get('name', '未命名点位')
            print(f"移动到: {point_name}")
            mc = self.connection.get_robot()
            target = point.get('positions', point.get('angles', []))

            # 添加关节角度限制检查
            safe_target = self.apply_joint_limits(target)

            # 使用安全角度
            mc.send_angles(safe_target, 50)

            # 高亮显示
            for i in range(self.teach_point_list.count()):
                if self.teach_points[i].get('name') == point.get('name'):
                    item = self.teach_point_list.item(i)
                    item.setBackground(QBrush(QColor("#007ACC")))
                    break

            QMessageBox.information(self, "移动成功", f"正在移动到: {point_name}")
        except Exception as e:
            QMessageBox.critical(self, "移动失败", f"移动过程中出错: {str(e)}")

    def apply_joint_limits(self, angles):
        """应用关节角度限制确保安全移动"""
        # 关节角度限制
        limits = [
            (-168, 168),  # 关节1
            (-135, 135),  # 关节2
            (-150, 150),  # 关节3
            (-145, 145),  # 关节4
            (-165, 165),  # 关节5
            (-180, 180)  # 关节6
        ]

        safe_angles = []
        for i, angle in enumerate(angles):
            if i < len(limits):
                min_val, max_val = limits[i]
                # 确保角度在限制范围内
                safe_angle = max(min_val, min(angle, max_val))
                safe_angles.append(safe_angle)
            else:
                safe_angles.append(angle)

        return safe_angles

    def delete_selected_point(self):
        """删除选定的示教点"""
        point = self.get_selected_point()
        if point:
            reply = QMessageBox.question(self, "确认删除",
                                         f"确定要删除示教点 '{point['name']}' 吗?",
                                         QMessageBox.Yes | QMessageBox.No)
            if reply == QMessageBox.Yes:
                self.teach_points = [p for p in self.teach_points if p['name'] != point['name']]
                if self.save_teach_points():
                    self.update_teach_point_list()
                    QMessageBox.information(self, "删除成功", f"已删除示教点: {point['name']}")
                else:
                    self.load_teach_points()
                    QMessageBox.critical(self, "删除失败", "无法保存更改")

    def toggle_grinding(self):
        # 在启动打磨前检查连接对象
        if not self.connection or not hasattr(self.connection, 'reconnect'):
            QMessageBox.warning(self, "连接错误", "机器人连接对象无效")
            return
        """切换打磨状态"""
        current_text = self.grinding_button.text()
        if current_text == "启动打磨":
            print("启动打磨程序...")
            self.grinding_button.setText("停止打磨")
            self.grinding_button.setStyleSheet("background-color: #FF4D4D;")
            self.grinding_status_indicator.setStyleSheet("background-color: #00FF00; border-radius: 10px;")
            
            # 获取轮廓点 - 通过检测线程访问检测系统
            contour_points = []
            if hasattr(self, 'detection_thread') and self.detection_thread is not None:
                # 确保检测线程有检测系统属性
                if hasattr(self.detection_thread, 'detection_system'):
                    # 确保检测系统有固定轮廓属性
                    if hasattr(self.detection_thread.detection_system, 'fixed_contour'):
                        contour_points = self.detection_thread.detection_system.fixed_contour
                        print(f"获取到固定轮廓，点数: {len(contour_points) if contour_points else 0}")
            
            if not contour_points:
                print("未找到轮廓点，使用默认矩形路径")
                return
        else:
            print("停止打磨程序...")
            self.grinding_button.setText("启动打磨")
            self.grinding_button.setStyleSheet("")
            self.grinding_status_indicator.setStyleSheet("background-color: #FF0000; border-radius: 10px;")
            
            # 打磨线程正在运行，请求停止
            if hasattr(self, 'grinding_thread') and self.grinding_thread.is_alive():
                try:
                    self.connection.get_robot().stop()
                except Exception as e:
                    print(f"停止机械臂时出错: {e}")

    def _run_grinding_procedure(self, contour_points, user_offset_x, user_offset_y, user_offset_z=260):
        if not self.connection or not self.connection.is_connected():
            print("未连接到机器人，无法执行打磨")
            return
            
        robot = self.connection.get_robot()
        original_mode = None
        original_ref_frame = None
        
        # 配置速度参数
        APPROACH_SPEED = 40   # 毫米/秒 - 接近和抬升速度
        GRIND_SPEED = 5      # 毫米/秒 - 打磨速度（此参数可调整）
        JOINT_SPEED = 5      # 关节运动速度（度/秒）
        
        # 电机控制标志
        motor_started = False
        
        try:
            # 启动电机
            if hasattr(self, 'motor_controller') and self.motor_controller:
                try:
                    # 设置电机速度
                    speed = getattr(self, 'motor_max_speed', 400)
                    print(f"启动打磨电机，转速: {speed} RPM")
                    self.motor_controller.stop()
                    self.motor_controller.set_speed(speed)
                    self.motor_controller.forward()
                    motor_started = True
                    time.sleep(0.5)  # 等待电机加速
                except Exception as motor_err:
                    print(f"启动电机失败: {motor_err}")
            
            # 保存原始运动模式和参考坐标系
            original_mode = robot.get_fresh_mode()
            original_ref_frame = robot.get_reference_frame()
            
            # 配置优化运动模式
            robot.set_fresh_mode(1)       # 刷新模式
            robot.set_movement_type(1)    # 直线运动模式
            robot.set_end_type(0)         # 使用法兰坐标系
            robot.set_reference_frame(0)  # 使用基坐标系
            
            # 固定姿态参数
            fixed_rx, fixed_ry, fixed_rz = 179.87, -3.78, -179.75
            
            # 轮廓点检查
            if not contour_points or len(contour_points) < 3:
                print("轮廓点不足，无法执行打磨")
                return
                
            # 使用检测系统
            detection_system = self.detection_thread.detection_system
                
            # 保存原始偏移量
            original_offset_x = user_offset_x
            original_offset_y = user_offset_y
            original_offset_z = user_offset_z
            
            # 重置当前循环计数
            self.grinding_current_loop = 0
            
            # 循环执行打磨
            for loop in range(self.grinding_loops):
                self.grinding_current_loop = loop + 1
                
                # 更新状态栏
                self.grinding_progress_label.setText(f"打磨: {self.grinding_current_loop}/{self.grinding_loops}")
                
                print(f"====== 开始打磨循环 {self.grinding_current_loop}/{self.grinding_loops} ======")
                
                # 计算当前循环的偏移量
                current_x_offset = original_offset_x + (self.grinding_x_step * loop)
                current_y_offset = original_offset_y + (self.grinding_y_step * loop)
                current_z_offset = original_offset_z + (self.grinding_z_step * loop)
                
                print(f"当前偏移: X={current_x_offset:.2f}, Y={current_y_offset:.2f}, Z={current_z_offset:.2f}")
                
                # 在生成轮廓点后添加检查
                self.check_path_points(contour_points)  # 检查原始路径点

                # 应用缩放比例
                scaled_points = []
                for point in contour_points:
                    scaled_x = point[0] * self.path_scale_factor_Y
                    scaled_y = point[1] * self.path_scale_factor_X
                    scaled_points.append([scaled_x, scaled_y])

                self.check_path_points(scaled_points)  # 检查缩放后的路径点

                # 应用缩放后的坐标转换
                world_contour = []
                for point in scaled_points:
                    pixel_x, pixel_y = point
                    world_x, world_y, success = detection_system.pixel_to_world_coords(pixel_x, pixel_y)
                    if not success:
                        print(f"无法转换坐标 ({pixel_x}, {pixel_y})")
                        continue
                        
                    world_contour.append([
                        world_x + current_x_offset,
                        world_y + current_y_offset,
                        current_z_offset
                    ])
                
                    
                # 运动序列优化
                safe_angles = [0.79, 53.43, -129.81, -8.96, 2.02, 90.17]  # 关节角安全位置
                start_point = world_contour[0]
                
                # 移动到起始点上方
                approach_height = current_z_offset + 10
                approach_position = [start_point[0], start_point[1], approach_height, fixed_rx, fixed_ry, fixed_rz]
                robot.sync_send_coords(approach_position, APPROACH_SPEED, mode=1, timeout=8)
                
                # 下降到打磨高度
                grind_position = [start_point[0], start_point[1], current_z_offset, fixed_rx, fixed_ry, fixed_rz]
                robot.sync_send_coords(grind_position, GRIND_SPEED, mode=1, timeout=8)
                
                # 轨迹打磨优化
                print(f"开始轮廓打磨 | 点数: {len(world_contour)} | 打磨速度: {GRIND_SPEED} mm/s")
                
                # 创建完整的运动轨迹
                path_points = []
                for point in world_contour:
                    # 检查停止请求
                    if self.grinding_button.text() == "启动打磨":
                        print("收到停止请求，终止打磨")
                        return
                        
                    # 所有点使用相同的高度和姿态
                    path_points.append([point[0], point[1], point[2], fixed_rx, fixed_ry, fixed_rz])
                
                # 获取当前机械臂角度作为初始值
                current_angles = robot.get_angles()
                if not current_angles:
                    print("无法获取当前角度，使用安全角度作为初始值")
                    current_angles = safe_angles
                
                # 获取锁定第六轴角度
                lock_j6 = current_angles[5] if len(current_angles) >= 6 else 90.17
                print(f"锁定第六轴角度为: {lock_j6:.2f}°")
                
                # 创建关节角度路径点列表
                joint_path_points = []
                
                # 遍历所有路径点进行转换
                for i, coords in enumerate(path_points):
                    # 1. 将XYZ坐标转换为角度坐标
                    try:
                        angles = robot.solve_inv_kinematics(coords, current_angles)
                    except Exception as e:
                        print(f"无法求解逆运动学 (点 {i}): {e}")
                        # 使用上一个点的角度
                        if joint_path_points:
                            angles = joint_path_points[-1]
                        else:
                            angles = current_angles
                    
                    angles[5] = lock_j6  # 锁定第六轴角度
                    angles[3] = angles[3] + 13.5 #抬升第四轴角度
                    
                    # 保存调整后的角度
                    joint_path_points.append(angles)
                    
                    # 更新当前角度用于下一个点
                    current_angles = angles
                
                print(f"关节角度路径点生成完成，共 {len(joint_path_points)} 个点")
                
                # 切换到关节运动模式
                print("切换到关节运动模式")
                robot.set_movement_type(0)  # 关节运动模式
                
                # 按顺序发送所有关节角度点
                for i, angles in enumerate(joint_path_points):
                    # 每5个点或最后1个点同步发送一次确保速度生效
                    if i % 5 == 0 or i == len(joint_path_points) - 1:
                        robot.sync_send_angles(angles, JOINT_SPEED, timeout=0.5)
                    else:
                        # 使用关节运动模式
                        robot.send_angles(angles, JOINT_SPEED)
                        
                    # 在关键点添加短暂延迟
                    if i % 20 == 0:
                        time.sleep(0.01)
                
                # 确保路径完成 - 同步发送结束点
                end_angles = joint_path_points[-1].copy()
                robot.sync_send_angles(end_angles, JOINT_SPEED, timeout=1)
                
                # 切换回直线运动模式
                print("切换回直线运动模式")
                robot.set_movement_type(1)  # 直线运动模式
                
                # 动态等待运动完成
                total_points = len(joint_path_points)
                estimated_time = (total_points * 0.12) * (50 / max(JOINT_SPEED, 1))
                wait_time = min(max(3, estimated_time), 15)
                
                print(f"预计等待时间: {wait_time:.1f}秒")
                start_time = time.time()
                
                while robot.is_moving():
                    # 实时检测停止请求
                    if self.grinding_button.text() == "启动打磨":
                        print("收到停止请求，终止等待")
                        robot.stop()  # 发送停止命令
                        break
                        
                    if time.time() - start_time > wait_time:
                        print("运动超时检查，继续流程")
                        break
                        
                    time.sleep(0.05)  # 更短的等待时间
                
                # 抬升工具头
                lift_position = [
                    path_points[-1][0], 
                    path_points[-1][1], 
                    approach_height, 
                    fixed_rx, fixed_ry, fixed_rz
                ]
                robot.sync_send_coords(lift_position, APPROACH_SPEED, mode=1, timeout=5)
                
                # 循环结束后的处理
                print(f"====== 完成打磨循环 {self.grinding_current_loop}/{self.grinding_loops} ======")
                
                # 如果不是最后一圈，短暂暂停
                if loop < self.grinding_loops - 1:
                    print("准备下一圈打磨...")
                    time.sleep(1) 
            
            # 返回安全位置
            robot.sync_send_angles(safe_angles, APPROACH_SPEED, timeout=8)
            
            print("所有打磨循环完成")
        except Exception as e:
            print(f"打磨过程中发生错误: {str(e)}")
            import traceback
            traceback.print_exc()
        finally:
            # 恢复原始运动模式和参考坐标系
            if original_mode is not None:
                try:
                    robot.set_fresh_mode(original_mode)
                    robot.set_movement_type(0)  # 恢复关节运动模式
                except Exception as restore_err:
                    print(f"恢复原始模式错误: {restore_err}")
                    
            if original_ref_frame is not None:
                try:
                    robot.set_reference_frame(original_ref_frame)
                except Exception as restore_err:
                    print(f"恢复参考系错误: {restore_err}")
            
            # 停止电机
            if motor_started and hasattr(self, 'motor_controller') and self.motor_controller:
                try:
                    print("停止打磨电机...")
                    self.motor_controller.emergency_stop()
                except Exception as motor_err:
                    print(f"停止电机时出错: {motor_err}")
            
            # UI状态更新
            try:
                self.grinding_button.setText("启动打磨")
                self.grinding_button.setStyleSheet("")
                self.grinding_status_indicator.setStyleSheet("background-color: #FF0000; border-radius: 10px;")
                self.grinding_progress_label.setText("打磨: 完成")
            except Exception as ui_err:
                print(f"更新UI时出错: {ui_err}")
            print("打磨线程完成")

    def check_path_points(self, contour_points):
        """检查生成的路径点"""
        if not contour_points:
            print("没有生成路径点")
            return
        
        print(f"=== 路径点检查 ===")
        print(f"原始点数: {len(contour_points)}")
        
        # 计算路径边界
        x_points = [p[0] for p in contour_points]
        y_points = [p[1] for p in contour_points]
        min_x, max_x = min(x_points), max(x_points)
        min_y, max_y = min(y_points), max(y_points)
        
        print(f"X范围: {min_x:.2f} - {max_x:.2f}, 宽度: {max_x - min_x:.2f}")
        print(f"Y范围: {min_y:.2f} - {max_y:.2f}, 高度: {max_y - min_y:.2f}")
        
        # 计算周长
        perimeter = 0
        for i in range(len(contour_points)):
            p1 = contour_points[i]
            p2 = contour_points[(i + 1) % len(contour_points)]
            perimeter += math.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)
        
        print(f"周长: {perimeter:.2f}像素")
        

    def execute_all_points(self):
        """执行所有示教点"""
        if not self.teach_points:
            QMessageBox.warning(self, "无示教点", "没有可执行的示教点")
            return
        move_type = "MOVEJ" if self.move_type_combo.currentIndex() == 0 else "MOVEL"
        speed = self.speed_slider.value()
        self.execute_points(None, move_type, speed)

    def execute_selected_point(self):
        """执行选定的示教点"""
        point = self.get_selected_point()
        if not point:
            QMessageBox.warning(self, "未选择", "请先选择一个示教点")
            return
        move_type = "MOVEJ" if self.move_type_combo.currentIndex() == 0 else "MOVEL"
        speed = self.speed_slider.value()
        index = self.teach_points.index(point)
        self.execute_points([index], move_type, speed)

    def execute_points(self, point_indices=None, move_type='MOVEJ', speed=50):
        """执行示教点"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "机械臂未连接，无法执行")
            return False
        if not self.teach_points:
            QMessageBox.warning(self, "无示教点", "没有可执行的示教点")
            return False

        if point_indices is None:
            points_to_execute = self.teach_points
        else:
            points_to_execute = [self.teach_points[i] for i in point_indices if 0 <= i < len(self.teach_points)]

        if not points_to_execute:
            QMessageBox.warning(self, "无效索引", "没有有效的示教点索引")
            return False

        self.execution_thread = threading.Thread(
            target=self.execute_points_thread,
            args=(points_to_execute, move_type, speed),
            daemon=True
        )
        self.execution_thread.start()
        print("执行线程已启动")

        self.progress_timer = QTimer()
        self.progress_timer.timeout.connect(self.update_progress)
        self.progress_timer.start(500)
        return True

    def execute_points_thread(self, points_to_execute, move_type, speed):
        """执行示教点的线程函数"""
        try:
            mc = self.connection.get_robot()
            if mc.focus_all_servos() != 1:
                print("警告：上电所有关节失败")

            print(f"开始执行 {len(points_to_execute)} 个点位 ({move_type}模式)...")
            self.execution_paused = False
            self.execution_stopped = False
            self.execution_progress = 0

            for i, point in enumerate(points_to_execute):
                print(f"\n执行点位 #{i + 1} ({point['name']})")
                self.execution_progress = int((i / len(points_to_execute)) * 100)

                if self.execution_stopped:
                    print("执行已被停止")
                    break

                while self.execution_paused:
                    print("执行暂停中...")
                    time.sleep(1)
                    if self.execution_stopped:
                        print("执行已被停止")
                        return False

                try:
                    target_position = None
                    if move_type == 'MOVEL':
                        target_position = point['coords']
                        result = mc.send_coords(target_position, speed, mode=1)
                        if result != 1:
                            print(f"发送坐标命令失败: {result}")
                            continue
                    else:
                        target_position = point.get('positions', point.get('angles', []))
                        result = mc.send_angles(target_position, speed)
                        if result != 1:
                            print(f"发送角度命令失败: {result}")
                            continue

                    start_time = time.time()
                    while True:
                        if not mc.is_moving():
                            break
                        if time.time() - start_time > 60:
                            print("警告：运动超时")
                            break
                        if self.execution_paused or self.execution_stopped:
                            mc.stop()
                            break
                        time.sleep(0.1)

                    if self.execution_paused:
                        print("运动已被暂停")
                        while self.execution_paused and not self.execution_stopped:
                            time.sleep(0.5)
                        if self.execution_stopped:
                            print("执行已被停止")
                            return False

                    if self.verification_enabled and target_position:
                        print("执行位置验证...")
                        if not self.verify_position(point, move_type):
                            print("警告：位置验证失败，可能需要重新执行")

                    self.last_executed_point = point
                    print(f"点位 #{i + 1} 执行完成")
                    time.sleep(0.5)
                except Exception as e:
                    print(f"执行点位 #{i + 1} 时出错: {str(e)}")
                    if not self.connection.is_connected():
                        print("尝试重新连接...")
                        self.connection.reconnect()
                    continue

            print("\n点位执行完毕" if not self.execution_stopped else "\n执行已停止")
            self.execution_progress = 100
            return True
        except Exception as e:
            print(f"执行过程中发生严重错误: {str(e)}")
            return False

    def update_progress(self):
        """更新进度条显示"""
        self.progress_bar.setValue(self.execution_progress)
        if self.execution_progress >= 100:
            self.progress_timer.stop()

    def verify_position(self, target_point, move_type):
        """验证当前位置是否达到目标位置"""
        try:
            mc = self.connection.get_robot()
            current_angles = mc.get_angles()
            current_coords = mc.get_coords()

            if not current_angles or not current_coords:
                print("验证失败：无法获取当前位置")
                return False

            if move_type == 'MOVEJ':
                target_angles = target_point.get('positions', target_point.get('angles', []))

                if len(current_angles) != 6 or len(target_angles) != 6:
                    print("验证失败：角度数据不完整")
                    return False

                errors = [abs(current_angles[i] - target_angles[i]) for i in range(6)]
                max_error = max(errors)

                if max_error > self.angle_tolerance:
                    print(f"验证失败：最大角度误差 {max_error:.2f}° > 容差 {self.angle_tolerance}°")
                    return False

                print(f"验证通过：最大角度误差 {max_error:.2f}°")
                return True
            else:
                current_pos = current_coords[:3]
                target_pos = target_point.get('coords', [])[:3]

                if len(current_pos) != 3 or len(target_pos) != 3:
                    print("验证失败：坐标数据不完整")
                    return False

                error = sum((current_pos[i] - target_pos[i]) ** 2 for i in range(3)) ** 0.5

                if error > self.coord_tolerance:
                    print(f"验证失败：位置误差 {error:.2f}mm > 容差 {self.coord_tolerance}mm")
                    return False

                print(f"验证通过：位置误差 {error:.2f}mm")
                return True
        except Exception as e:
            print(f"验证位置发生错误: {str(e)}")
            return False

    def pause_execution(self):
        """暂停执行"""
        if not self.execution_thread or not self.execution_thread.is_alive():
            QMessageBox.warning(self, "无执行", "没有正在执行的线程")
            return False

        try:
            mc = self.connection.get_robot()
            result = mc.pause()
            if result != 1:
                print(f"暂停命令失败: {result}")
            self.execution_paused = True
            print("执行已暂停")
            return True
        except Exception as e:
            QMessageBox.critical(self, "暂停失败", f"暂停执行失败: {str(e)}")
            return False

    def resume_execution(self):
        """恢复执行"""
        if not self.execution_thread or not self.execution_thread.is_alive():
            QMessageBox.warning(self, "无执行", "没有正在执行的线程")
            return False

        try:
            mc = self.connection.get_robot()
            result = mc.resume()
            if result != 1:
                print(f"恢复命令失败: {result}")
            self.execution_paused = False
            print("执行已恢复")
            return True
        except Exception as e:
            QMessageBox.critical(self, "恢复失败", f"恢复执行失败: {str(e)}")
            return False

    def stop_execution(self):
        """停止执行"""
        if not self.execution_thread or not self.execution_thread.is_alive():
            QMessageBox.warning(self, "无执行", "没有正在执行的线程")
            return False

        try:
            mc = self.connection.get_robot()
            result = mc.stop()
            if result != 1:
                print(f"停止命令失败: {result}")
            self.execution_paused = False
            self.execution_stopped = True
            print("执行已停止")
            return True
        except Exception as e:
            QMessageBox.critical(self, "停止失败", f"停止执行失败: {str(e)}")
            return False

    def closeEvent(self, event):
        """关闭窗口时停止所有线程"""
        if hasattr(self, 'camera_thread') and self.camera_thread and self.camera_thread.isRunning():
            self.camera_thread.stop()
            self.camera_thread = None

            # 关闭检测线程
        if self.detection_thread:
            self.detection_thread.stop()
            self.detection_thread = None

        if self.connection:
            self.connection.disconnect()
            self.connection.stop_heartbeat()
            self.stop_audio_server()
            self.stop_speech_recognition()

        event.accept()

    def change_camera_type(self, index):
        """切换摄像头类型"""
        self.camera_type = "network" if index == 1 else "local"
        print(f"切换摄像头类型为: {self.camera_type}")

    def load_onnx_model(self):
        """加载ONNX模型"""
        options = QFileDialog.Options()
        model_path, _ = QFileDialog.getOpenFileName(
            self, "选择ONNX模型文件", "",
            "ONNX模型 (*.onnx);;所有文件 (*)",
            options=options
        )

        if model_path:
            # 验证模型文件是否存在
            if not os.path.exists(model_path):
                QMessageBox.warning(self, "错误", "模型文件不存在")
                return

            self.onnx_model_path = model_path
            self.onnx_model_path_input.setText(model_path)

            # 显示成功消息
            QMessageBox.information(self, "模型加载", f"模型加载成功: {os.path.basename(model_path)}")

    def toggle_detection(self):
        """切换目标检测状态"""
        if self.detect_button.text() == "启动检测":
            # 启动检测
            try:
                # 确保模型路径有效
                if not self.onnx_model_path:
                    QMessageBox.warning(self, "模型未加载", "请先加载ONNX模型")
                    return

                # 创建新的检测线程（使用ONNX）
                self.detection_thread = ONNXDetectionThread(self.onnx_model_path)

                # 修复5: 在启动线程前加载模型
                if not self.detection_thread.load_model():
                    QMessageBox.critical(self, "错误", "无法加载ONNX模型")
                    return

                # 设置摄像头类型
                if self.camera_type == "network":
                    ip = self.camera_ip_input.text()
                    port = int(self.camera_port_input.text())
                    self.detection_thread.set_camera_type("network")
                else:
                    self.detection_thread.set_camera_type("local")

                # 连接信号
                self.detection_thread.update_frame.connect(self.update_frame)
                self.detection_thread.detection_result.connect(self.update_detection_result)
                self.detection_thread.detection_coords.connect(self.handle_detection_coords)

                self.detection_thread.start()
                self.detect_button.setText("停止检测")
                self.detect_button.setStyleSheet("background-color: #FF4D4D;")
            except Exception as e:
                QMessageBox.critical(self, "错误", f"无法启动目标检测: {str(e)}")
        else:
            # 停止检测
            if self.detection_thread:
                self.detection_thread.stop()
                self.detection_thread.wait(2000)  # 等待线程结束
                self.detection_thread = None
                self.detect_button.setText("启动检测")
                self.detect_button.setStyleSheet("")
                self.detection_label.clear()
                self.detection_label.setText("摄像头未启动")
            

    def handle_detection_coords(self, coords_list):
        """处理检测到的坐标"""
        for coords in coords_list:
            if coords:
                world_x, world_y = coords
                # print(f"检测到物体位置: 世界坐标({world_x:.1f}, {world_y:.1f})")

    def load_teach_points(self):
        """从文件加载示教点"""
        try:
            if os.path.exists(TEACH_POINTS_FILE):
                with open(TEACH_POINTS_FILE, 'r') as f:
                    self.teach_points = json.load(f)
                    print(f"成功加载 {len(self.teach_points)} 个示教点")
            else:
                self.teach_points = []
                print("未找到示教点文件，已初始化空列表")
        except Exception as e:
            print(f"加载示教点时出错: {str(e)}")
            self.teach_points = []

    def save_teach_points(self):
        """保存示教点到文件"""
        try:
            with open(TEACH_POINTS_FILE, 'w') as f:
                json.dump(self.teach_points, f, indent=2)
                print(f"成功保存 {len(self.teach_points)} 个示教点")
            return True
        except Exception as e:
            print(f"保存示教点时出错: {str(e)}")
            return False

    def save_teach_point_object(self, point):
        """保存示教点对象到列表并更新UI"""
        # 检查是否已存在同名点
        existing = [p for p in self.teach_points if p['name'] == point['name']]
        if existing:
            # 更新现有点
            for i, p in enumerate(self.teach_points):
                if p['name'] == point['name']:
                    self.teach_points[i] = point
                    break
        else:
            # 添加新点
            self.teach_points.append(point)

        if self.save_teach_points():
            self.update_teach_point_list()

    def update_teach_point_list(self):
        """更新示教点列表显示"""
        self.teach_point_list.clear()
        for point in self.teach_points:
            item = QListWidgetItem(point.get('name', '未命名点'))
            item.setData(Qt.UserRole, point)
            self.teach_point_list.addItem(item)

    def move_to_xyz(self):
        """移动到指定的XYZ坐标"""
        if not self.connection or not self.connection.is_connected():
            QMessageBox.warning(self, "未连接", "未连接到机器人，无法移动")
            return

        try:
            # 获取输入的坐标值
            x = float(self.target_x_input.text()) if self.target_x_input.text() else 0
            y = float(self.target_y_input.text()) if self.target_y_input.text() else 0
            z = float(self.target_z_input.text()) if self.target_z_input.text() else 0
            x += float(self.offset_x_input.text()) if self.offset_x_input.text() else 0
            y += float(self.offset_y_input.text()) if self.offset_y_input.text() else 0
            z += float(self.offset_z_input.text()) if self.offset_z_input.text() else 0

            # 获取当前姿态
            coords = self.connection.get_robot().get_coords()
            if len(coords) < 6:
                QMessageBox.warning(self, "错误", "无法获取机器人当前位置")
                return

            # 创建目标坐标
            target_coords = [x, y, z] + coords[3:]

            # 移动前记录4轴角度
            angles_before = self.connection.get_robot().get_angles()
            if angles_before and len(angles_before) >= 4:
                axis4_angle_before = angles_before[3]
                print(f"移动前记录的4轴角度: {axis4_angle_before}°")
            else:
                print("无法获取移动前角度")
                axis4_angle_before = None

            # 执行移动指令
            self.connection.get_robot().send_coords(target_coords, 30, mode=1)

            # 等待移动完成
            while self.connection.get_robot().is_moving():
                time.sleep(0.1)

            # 如果启用角度修正
            if self.angle_correction_checkbox.isChecked():
                # 移动完成后检测4轴角度
                angles_after = self.connection.get_robot().get_angles()

                if angles_after and len(angles_after) >= 4 and axis4_angle_before is not None:
                    axis4_angle_after = angles_after[3]
                    print(f"移动后4轴实际角度: {axis4_angle_after}°")

                    # 检查角度变化
                    angle_diff = abs(axis4_angle_before - axis4_angle_after)
                    if angle_diff > 0.7:
                        print(f"检测到4轴角度偏移 {angle_diff:.2f}°，进行修正...")

                        # 构建修正后的目标角度
                        correction_angles = list(angles_after)
                        correction_angles[3] = correction_angles[3] + 6.5  # 修正第4轴

                        # 执行角度修正
                        self.connection.get_robot().send_angles(correction_angles, 20)

                        # 验证修正结果
                        time.sleep(0.5)
                        final_angles = self.connection.get_robot().get_angles()
                        if len(final_angles) >= 4:
                            print(f"修正后4轴角度: {final_angles[3]}°")
                            print(f"最终误差: {abs(axis4_angle_before - final_angles[3]):.2f}°")
                    else:
                        print("4轴角度变化在允许范围内，无需修正")

            QMessageBox.information(self, "移动完成", f"已移动到位置: X={x}, Y={y}, Z={z}")

        except ValueError:
            QMessageBox.warning(self, "输入错误", "请输入有效的数字坐标")
        except Exception as e:
            QMessageBox.critical(self, "移动错误", f"移动过程中出错: {str(e)}")

    def detect_image(self):
        """检测单张图像"""
        options = QFileDialog.Options()
        file_path, _ = QFileDialog.getOpenFileName(
            self, "选择图像文件", "",
            "图像文件 (*.png *.jpg *.jpeg *.bmp);;所有文件 (*)",
            options=options
        )

        if not file_path:
            return

        # 创建新的检测线程用于单张图像
        image_detector = ONNXDetectionThread(self.onnx_model_path)

        #设置单张图像路径
        image_detector.set_single_image(file_path)

        # 连接信号
        image_detector.single_image_result.connect(self.handle_single_image_result)
        image_detector.finished.connect(lambda: image_detector.deleteLater())

        # 启动线程
        image_detector.start()

        # 显示加载状态
        self.detection_label.clear()
        self.detection_label.setText("正在检测图像...")
        self.detection_result_text.setText("处理中...")

    def handle_single_image_result(self, result_frame, result_text):
        """处理单张图像检测结果"""
        if result_frame is None:
            # 错误处理
            self.detection_label.setText("检测失败")
            self.detection_result_text.setText(result_text)
            QMessageBox.warning(self, "错误", result_text)
            return

        # 显示结果图像
        self.display_image(result_frame)

        # 显示检测结果
        self.detection_result_text.setText("检测结果: " + result_text)

    def display_image(self, frame):
        """显示图像在QLabel中"""
        rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        bytes_per_line = ch * w
        q_img = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
        pixmap = QPixmap.fromImage(q_img)

        # 缩放图像以适应标签大小
        scaled_pixmap = pixmap.scaled(
            self.detection_label.width(),
            self.detection_label.height(),
            Qt.KeepAspectRatio,
            Qt.SmoothTransformation
        )

        self.detection_label.setPixmap(scaled_pixmap)

    def calibrate_single_image(self):
        """使用单张图像进行标定"""
        options = QFileDialog.Options()
        file_path, _ = QFileDialog.getOpenFileName(
            self, "选择标定图像", "",
            "图像文件 (*.png *.jpg *.jpeg *.bmp);;所有文件 (*)",
            options=options
        )

        if not file_path:
            return

        # 确保检测线程已经创建
        if not self.detection_thread:
            self.create_detection_thread()

        # 执行标定
        if self.detection_thread:
            success = self.detection_thread.calibrate_single_image(file_path)
            if success:
                QMessageBox.information(self, "标定成功", "使用单张图像标定成功！")
            else:
                QMessageBox.warning(self, "标定失败", "无法完成标定，请确保图像中包含两个ArUco标记")

    def create_detection_thread(self):
        """创建检测线程"""
        try:
            # 获取摄像头类型和参数
            camera_type = "network" if self.camera_type_combo.currentIndex() == 1 else "local"
            ip = self.camera_ip_input.text()
            port = int(self.camera_port_input.text())

            # 创建检测线程，传入正确的参数
            self.detection_thread = ONNXDetectionThread(
                self.onnx_model_path,
                ip=ip,
                port=port
            )
            self.detection_thread.camera_type = camera_type

            # 连接信号
            self.detection_thread.update_frame.connect(self.update_frame)
            self.detection_thread.detection_result.connect(self.update_detection_result)
            self.detection_thread.detection_coords.connect(self.handle_detection_coords)

        except Exception as e:
            QMessageBox.critical(self, "错误", f"创建检测线程失败: {str(e)}")
            self.detection_thread = None

    def start_calibration(self):
        """启动摄像头标定"""
        # 确保检测线程已经启动
        if not self.detection_thread or not self.detection_thread.isRunning():
            # 先创建检测线程
            self.create_detection_thread()

            # 如果线程创建成功，启动它
            if self.detection_thread:
                self.detection_thread.start()
                self.detect_button.setText("停止检测")
                self.detect_button.setStyleSheet("background-color: #FF4D4D;")

        # 确保检测线程已正确初始化
        if self.detection_thread and self.detection_thread.isRunning():
            try:
                self.detection_thread.perform_calibration()
            except Exception as e:
                QMessageBox.critical(self, "标定错误", f"标定过程中发生错误: {str(e)}")
        else:
            QMessageBox.warning(self, "错误", "无法启动标定，请先确保摄像头检测已正常启动")

    def apply_offsets(self):
        """应用用户设置的偏移量"""
        try:
            self.user_offset_x = float(self.offset_x_input.text() or 0)
            self.user_offset_y = float(self.offset_y_input.text() or 0)
            self.user_offset_z = float(self.offset_z_input.text() or 0)

            # 验证偏移值
            if abs(self.user_offset_x) > 200 or abs(self.user_offset_y) > 200 or abs(self.user_offset_z) > 300:
                QMessageBox.warning(self, "偏移过大", "偏移量不能超过±200mm,z不能超过300mm")
                return

            QMessageBox.information(self, "偏移应用",
                                    f"偏移量已设置:\nX: {self.user_offset_x}mm\nY: {self.user_offset_y}mm\nZ: {self.user_offset_z}mm")
        except ValueError:
            QMessageBox.warning(self, "输入错误", "请输入有效的数字偏移量")

    def start_audio_server(self):
        """启动麦克风服务器"""
        if self.audio_thread and self.audio_thread.isRunning():
            return

        try:
            self.audio_thread = AudioServerThread()
            self.audio_thread.status_changed.connect(self.update_audio_status)
            self.audio_thread.audio_data_ready.connect(self.process_audio_data)
            self.audio_thread.start()
        except Exception as e:
            QMessageBox.warning(self, "麦克风错误",
                                f"无法启动麦克风服务器: {str(e)}\n机器人连接不受影响")
            self.audio_status_label.setText("麦克风: 启动失败")

    def stop_audio_server(self):
        """停止麦克风服务器"""
        if self.audio_thread:
            self.audio_thread.stop()
            self.audio_thread.wait(1000)
            self.audio_thread = None
            self.audio_status_label.setText("麦克风: 已停止")

    def update_audio_status(self, message):
        """更新麦克风状态"""
        self.audio_status_label.setText(f"麦克风: {message}")
        print(f"[AUDIO] {message}")

    def process_audio_data(self, audio_data):
        """处理接收到的音频数据 - 用于AI语音识别"""
        if self.speech_recognition_active:
            print(f"接收到 {len(audio_data)} 字节音频数据，可用于语音识别")
            # 临时示例：简单检测音量
            volume = self.calculate_volume(audio_data)
            if volume > 0.50: 
                print("检测到语音活动，准备识别...")
                self.start_speech_recognition()

    def calculate_volume(self, audio_data):
        """计算音频数据的音量"""
        import numpy as np
        audio_array = np.frombuffer(audio_data, dtype=np.int16)
        rms = np.sqrt(np.mean(np.square(audio_array)))
        return rms

    def toggle_speech_recognition(self):
        """切换语音识别状态"""
        if not self.audio_thread or not self.audio_thread.isRunning():
            QMessageBox.warning(self, "麦克风未连接", "请先连接麦克风")
            return

        if self.speech_recognition_active:
            self.stop_speech_recognition()
            self.speech_recognition_btn.setText("启动语音识别")
        else:
            self.start_speech_recognition()
            self.speech_recognition_btn.setText("停止语音识别")

    def start_speech_recognition(self):
        """启动语音识别"""
        self.speech_recognition_active = True
        self.status_label.setText("语音识别已启动...")
        print("语音识别启动，等待语音命令...")

    def stop_speech_recognition(self):
        """停止语音识别"""
        self.speech_recognition_active = False
        self.status_label.setText("语音识别已停止")
        print("语音识别停止")

    def toggle_camera(self):
        """切换摄像头状态"""
        if not hasattr(self, 'camera_thread') or not self.camera_thread or not self.camera_thread.isRunning():
            # 获取摄像头类型和参数
            camera_type = "network" if self.camera_type_combo.currentIndex() == 1 else "local"
            ip = self.camera_ip_input.text()
            port = int(self.camera_port_input.text())

            # 创建摄像头线程
            self.camera_thread = CameraThread(camera_type, ip, port)
            self.camera_thread.update_frame.connect(self.update_frame)
            self.camera_thread.status_changed.connect(self.update_camera_status)
            self.camera_thread.start()

            self.open_camera_btn.setText("摄像头运行中")
            self.open_camera_btn.setEnabled(False)
            self.close_camera_btn.setEnabled(True)
        else:
            QMessageBox.information(self, "摄像头状态", "摄像头已在运行中")

    def close_camera(self):
        """关闭摄像头"""
        if hasattr(self, 'camera_thread') and self.camera_thread and self.camera_thread.isRunning():
            self.camera_thread.stop()
            self.camera_thread = None

            # 清空画面
            self.detection_label.clear()
            self.detection_label.setText("摄像头已关闭")

            self.open_camera_btn.setText("打开摄像头")
            self.open_camera_btn.setEnabled(True)
            self.close_camera_btn.setEnabled(False)
            self.update_camera_status("摄像头: 关闭")

    def update_camera_status(self, message):
        """更新摄像头状态"""
        self.camera_status_label.setText(message)
        if "摄像头: " not in message:
            self.camera_status_label.setText(f"摄像头: {message}")

if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = RobotControlUI()
    screen_rect = app.primaryScreen().availableGeometry()
    window.setMaximumSize(screen_rect.width(), screen_rect.height())
    window.show()
    sys.exit(app.exec_())
