import os
import pyrealsense2 as rs
import threading
from PyQt6 import QtCore, QtGui
from PyQt6.QtCore import Qt, QTimer
from PyQt6.QtWidgets import (
    QDialog, QLabel, QComboBox, QPushButton, QTextEdit,
    QVBoxLayout, QHBoxLayout, QGroupBox
)
import cv2
import numpy as np
import open3d as o3d

class RealSenseThread:
    def __init__(self):
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        
        # 支持的分辨率列表
        self.resolution_presets = {
            "640x480": (640, 480, 30),
            "848x480": (848, 480, 30),
            "1280x720": (1280, 720, 30)
        }
        self.current_resolution = "640x480"

        # 检查是否有设备连接
        self.context = rs.context()
        if len(self.context.devices) == 0:
            print("⚠️ 未检测到 RealSense 设备！")
            self.device_connected = False
            return
        else:
            print("✅ RealSense 设备已连接")
            self.device_connected = True

        # 图像缓存和线程控制
        self.color_image = None
        self.depth_aligned_to_color = None
        self.lock = threading.Lock()
        self.running = True

        # 初始化配置
        self._configure_camera()

        # get_intrinsics() 需要在配置后调用
        self.intrinsics = None

        # 在 _configure_camera() 成功启动 pipeline 后添加：
        profile = self.pipeline.get_active_profile()
        color_profile = profile.get_stream(rs.stream.color)
        intr = color_profile.as_video_stream_profile().get_intrinsics()
        self.intrinsics = (intr.fx, intr.fy, intr.ppx, intr.ppy)

        # 启动图像采集线程
        self.thread = threading.Thread(target=self._capture_frames)
        self.thread.start()

    def _configure_camera(self):
        """配置相机参数"""
        try:
            # 停止当前的pipeline
            try:
                self.pipeline.stop()
            except:
                pass

            # 重新配置
            self.config = rs.config()
            w, h, fps = self.resolution_presets[self.current_resolution]
            self.config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, fps)
            self.config.enable_stream(rs.stream.depth, w, h, rs.format.z16, fps)
            
            # 启动pipeline
            self.pipeline.start(self.config)
            
            # 对齐与点云设置
            self.align_to_color = rs.align(rs.stream.color)
            self.pc = rs.pointcloud()
            
            print(f"✅ 相机配置更新成功：{w}x{h}@{fps}fps")
            return True
        except Exception as e:
            print(f"❌ 相机配置失败：{str(e)}")
            return False

    def _capture_frames(self):
        """图像采集线程"""
        while self.running:
            try:
                frames = self.pipeline.wait_for_frames(timeout_ms=5000)
                aligned_frames = self.align_to_color.process(frames)
                color_frame = aligned_frames.get_color_frame()
                depth_frame = aligned_frames.get_depth_frame()

                if not color_frame or not depth_frame:
                    continue

                color_image = np.asanyarray(color_frame.get_data())
                depth_image = np.asanyarray(depth_frame.get_data())

                with self.lock:
                    self.color_image = color_image
                    self.depth_aligned_to_color = depth_image

            except Exception as e:
                print(f"[子线程异常] {e}")
                with self.lock:
                    self.device_connected = False
                break

    def get_intrinsics(self):
        """
        获取对齐后彩色图像的相机内参（fx, fy, cx, cy）
        返回: (fx, fy, cx, cy) 单位：像素
        """
        return self.intrinsics
    def change_resolution(self, resolution_key):
        """更改相机分辨率"""
        if resolution_key in self.resolution_presets:
            self.current_resolution = resolution_key
            return self._configure_camera()
        return False

    def is_connected(self):
        """主线程调用：获取当前连接状态"""
        with self.lock:
            return self.device_connected

    def get_images(self):
        with self.lock:
            color = self.color_image.copy() if self.color_image is not None else None
            depth = self.depth_aligned_to_color.copy() if self.depth_aligned_to_color is not None else None
        return color, depth
    
    def save_images(self, color_filename="color_image.png", depth_filename="depth_image.png"):
        """保存当前对齐后的图像帧"""
        color_frame, depth_frame = self.get_images()
        if color_frame is not None:
            cv2.imwrite(color_filename, color_frame)
            print(f"保存彩色图像至 {color_filename}")
        if depth_frame is not None:
            cv2.imwrite(depth_filename, depth_frame)
            print(f"保存深度图像至 {depth_filename}")

    def get_pointcloud(self):
        """获取点云数据（可用于后续三维重建等）"""
        frames = self.pipeline.wait_for_frames()
        aligned_frames = self.align_to_color.process(frames)
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        if not depth_frame or not color_frame:
            return None

        # 生成点云
        points = self.pc.calculate(depth_frame)
        self.pc.map_to(color_frame)

        # 获取顶点数据（点云坐标）
        vtx = np.asanyarray(points.get_vertices(2)).view(np.float32).reshape(-1, 3)  # xyz
        tex = np.asanyarray(points.get_texture_coordinates(2)).reshape(-1, 2)  # uv

        return {
            'points': vtx,
            'texture_coords': tex,
            'color_image': np.asanyarray(color_frame.get_data())
        }

    def save_pointcloud_to_ply(self, filename="pointcloud.ply"):
        """
        将点云数据保存为PLY文件（带颜色）
        :param points: Nx3 的 xyz 坐标数组
        :param texture_coords: Nx2 的 uv 坐标数组
        :param color_image: 对应的彩色图像（BGR格式）
        :param filename: 输出文件名
        """
        pc_data = self.get_pointcloud()
        points = pc_data['points']
        texture_coords = pc_data['texture_coords']
        color_image = pc_data['color_image']

        height, width, _ = color_image.shape

        # 将纹理坐标映射到像素坐标
        uvs = np.floor(texture_coords * (width, height)).astype(np.int32)
        uvs[:, 0] = np.clip(uvs[:, 0], 0, width - 1)
        uvs[:, 1] = np.clip(uvs[:, 1], 0, height - 1)

        # 获取每个点的颜色
        colors = color_image[uvs[:, 1], uvs[:, 0]]
        colors = colors[:, ::-1]  # BGR -> RGB

        # 过滤掉无效点（x,y,z 全为零）
        mask = ~np.all(np.isclose(points, 0), axis=1)
        points = points[mask]
        colors = colors[mask]

        # 写入PLY文件
        with open(filename, 'w') as f:
            f.write("ply\n")
            f.write("format ascii 1.0\n")
            f.write(f"element vertex {len(points)}\n")
            f.write("property float x\n")
            f.write("property float y\n")
            f.write("property float z\n")
            f.write("property uchar red\n")
            f.write("property uchar green\n")
            f.write("property uchar blue\n")
            f.write("end_header\n")

            for i in range(len(points)):
                x, y, z = points[i]
                r, g, b = colors[i]
                f.write(f"{x} {y} {z} {r} {g} {b}\n")

        print(f"点云已保存至 {filename}")

    def stop(self):
        """停止相机线程"""
        self.running = False
        if hasattr(self, 'thread'):
            self.thread.join()
        try:
            self.pipeline.stop()
        except:
            pass

class PointCloudThread(QtCore.QThread):
    """用于在新线程中显示点云"""
    finished = QtCore.pyqtSignal()

    def __init__(self, pcd, parent=None):
        super().__init__(parent)
        self.pcd = pcd

    def run(self):
        try:
            # 显示点云
            o3d.visualization.draw_geometries([self.pcd])
        except Exception as e:
            print(f"显示点云时出错: {str(e)}")
        finally:
            self.finished.emit()

class CameraInfoDialog(QDialog):
    def __init__(self, camera, main_window, parent=None):
        super().__init__(parent)
        self.camera = camera  # RealSenseThread实例
        self.main_window = main_window  # 保存主窗口引用
        self.setup_ui()
        self.setup_connections()

    def setup_ui(self):
        self.setWindowTitle("相机信息")
        self.resize(1300, 800)
        
        # 主布局为垂直布局
        main_layout = QVBoxLayout()

        # 顶部控制区域
        control_layout = QHBoxLayout()
        
        # 分辨率选择
        resolution_label = QLabel("分辨率:")
        resolution_label.setStyleSheet("QLabel { font: bold 10pt; }")
        self.resolution_combo = QComboBox()
        self.resolution_combo.addItems(self.camera.resolution_presets.keys())
        self.resolution_combo.setCurrentText(self.camera.current_resolution)
        control_layout.addWidget(resolution_label)
        control_layout.addWidget(self.resolution_combo)
        control_layout.addStretch()  # 添加弹性空间
        
        # 控制按钮
        self.depth_show = QPushButton("显示深度图")
        self.depth_show.setCheckable(True)
        self.depth_show.setChecked(False)
        self.pc_show = QPushButton("显示点云")
        control_layout.addWidget(self.depth_show)
        control_layout.addWidget(self.pc_show)
        main_layout.addLayout(control_layout)

        # 上方图像区域（水平布局）
        images_layout = QHBoxLayout()

        # 彩色图像区域
        color_group = QGroupBox("彩色图像")
        color_group_layout = QVBoxLayout()
        self.color_img = QLabel()
        self.color_img.setFixedSize(640, 480)
        self.color_img.setAlignment(Qt.AlignmentFlag.AlignCenter)
        color_group_layout.addWidget(self.color_img)
        color_group.setLayout(color_group_layout)
        images_layout.addWidget(color_group)

        # 深度图像区域
        depth_group = QGroupBox("深度图像")
        depth_group.setVisible(False)
        self.depth_group = depth_group
        depth_group_layout = QVBoxLayout()
        self.depth_img = QLabel()
        self.depth_img.setFixedSize(640, 480)
        self.depth_img.setAlignment(Qt.AlignmentFlag.AlignCenter)
        depth_group_layout.addWidget(self.depth_img)
        depth_group.setLayout(depth_group_layout)
        images_layout.addWidget(depth_group)

        main_layout.addLayout(images_layout)

        # 下方信息区域（水平布局）
        info_layout = QHBoxLayout()

        # 彩色相机信息
        color_info_group = QGroupBox("彩色相机信息")
        color_info_layout = QVBoxLayout()
        self.color_info = QTextEdit()
        self.color_info.setMaximumHeight(150)
        self.color_info.setReadOnly(True)
        color_info_layout.addWidget(self.color_info)
        color_info_group.setLayout(color_info_layout)
        info_layout.addWidget(color_info_group)

        # 深度相机信息
        depth_info_group = QGroupBox("深度相机信息")
        depth_info_group.setVisible(False)
        self.depth_info_group = depth_info_group
        depth_info_layout = QVBoxLayout()
        self.depth_info = QTextEdit()
        self.depth_info.setMaximumHeight(150)
        self.depth_info.setReadOnly(True)
        depth_info_layout.addWidget(self.depth_info)
        depth_info_group.setLayout(depth_info_layout)
        info_layout.addWidget(depth_info_group)

        main_layout.addLayout(info_layout)

        self.setLayout(main_layout)

        self.update_timer = QTimer(self)
        self.update_timer.setInterval(33)

    def setup_connections(self):
        self.update_timer.timeout.connect(self.update_camera_info)
        self.depth_show.toggled.connect(self.toggle_depth_display)
        self.pc_show.clicked.connect(self.show_pointcloud)
        self.resolution_combo.currentTextChanged.connect(self.change_resolution)

    def showEvent(self, event):
        if hasattr(self.main_window, 'camera_update_timer'):
            self.main_window.camera_update_timer.stop()
        self.update_timer.start()
        super().showEvent(event)

    def closeEvent(self, event):
        self.update_timer.stop()
        if hasattr(self.main_window, 'camera_update_timer'):
            self.main_window.camera_update_timer.start()
        super().closeEvent(event)

    def update_camera_info(self):
        color_frame, depth_frame = self.camera.get_images()
        if color_frame is not None:
            # 更新彩色图像
            rgb_image = cv2.cvtColor(color_frame, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb_image.shape
            qt_image = QtGui.QImage(rgb_image.data, w, h, ch * w, QtGui.QImage.Format.Format_RGB888)
            self.color_img.setPixmap(QtGui.QPixmap.fromImage(qt_image).scaled(
                self.color_img.size(), Qt.AspectRatioMode.KeepAspectRatio, Qt.TransformationMode.SmoothTransformation))

            # 更新彩色相机信息
            profile = self.camera.pipeline.get_active_profile()
            color_stream = profile.get_stream(rs.stream.color)
            color_intrinsics = color_stream.as_video_stream_profile().get_intrinsics()
            color_info = (
                f"图像尺寸信息：\n"
                f"宽度: {w} 像素\n"
                f"高度: {h} 像素\n"
                f"通道数: {ch}\n"
                f"\n相机内参：\n"
                f"焦距: fx={color_intrinsics.fx:.2f}, fy={color_intrinsics.fy:.2f}\n"
                f"主点: cx={color_intrinsics.ppx:.2f}, cy={color_intrinsics.ppy:.2f}\n"
                f"畸变系数: {[round(x, 4) for x in color_intrinsics.coeffs]}\n"
                f"\n其他信息：\n"
                f"帧率: {color_stream.fps()}fps\n"
                f"格式: {color_stream.format()}"
            )
            self.color_info.setText(color_info)

        if depth_frame is not None and self.depth_show.isChecked():
            # 更新深度图像
            depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_frame, alpha=0.03),
                cv2.COLORMAP_JET
            )
            depth_colormap = cv2.cvtColor(depth_colormap, cv2.COLOR_BGR2RGB)
            h, w = depth_frame.shape

            # 显示深度图
            qt_image = QtGui.QImage(depth_colormap.data, w, h, 3 * w, QtGui.QImage.Format.Format_RGB888)
            self.depth_img.setPixmap(QtGui.QPixmap.fromImage(qt_image).scaled(
                self.depth_img.size(), Qt.AspectRatioMode.KeepAspectRatio, Qt.TransformationMode.SmoothTransformation))

            # 更新深度相机信息
            depth_stream = self.camera.pipeline.get_active_profile().get_stream(rs.stream.depth)
            depth_intrinsics = depth_stream.as_video_stream_profile().get_intrinsics()
            depth_scale = self.camera.pipeline.get_active_profile().get_device().first_depth_sensor().get_depth_scale()
            depth_info = (
                f"图像尺寸信息：\n"
                f"宽度: {w} 像素\n"
                f"高度: {h} 像素\n"
                f"\n相机内参：\n"
                f"焦距: fx={depth_intrinsics.fx:.2f}, fy={depth_intrinsics.fy:.2f}\n"
                f"主点: cx={depth_intrinsics.ppx:.2f}, cy={depth_intrinsics.ppy:.2f}\n"
                f"畸变系数: {[round(x, 4) for x in depth_intrinsics.coeffs]}\n"
                f"\n其他信息：\n"
                f"帧率: {depth_stream.fps()}fps\n"
                f"格式: {depth_stream.format()}\n"
                f"深度比例: {depth_scale:.6f} 米/单位"
            )
            self.depth_info.setText(depth_info)

    def toggle_depth_display(self, checked):
        # 控制深度图像和信息的显示/隐藏
        self.depth_group.setVisible(checked)
        self.depth_info_group.setVisible(checked)

    def show_pointcloud(self):
        """在单独的线程中显示点云"""
        try:
            # 获取当前帧的点云数据
            pc_data = self.camera.get_pointcloud()
            if pc_data is not None:
                # 创建点云对象
                pcd = o3d.geometry.PointCloud()
                # 过滤无效点（深度为0的点）
                points = pc_data['points']
                valid_points = ~np.all(points == 0, axis=1)
                points = points[valid_points]
                # 设置点云坐标
                pcd.points = o3d.utility.Vector3dVector(points)

                # 获取颜色图像并转换为RGB
                color_image = cv2.cvtColor(pc_data['color_image'], cv2.COLOR_BGR2RGB)
                h, w = color_image.shape[:2]

                # 获取纹理坐标并过滤
                tex_coords = pc_data['texture_coords'][valid_points]

                # 计算颜色
                colors = []
                for uv in tex_coords:
                    # 确保纹理坐标在有效范围内
                    u = min(max(int(uv[0] * w), 0), w - 1)
                    v = min(max(int(uv[1] * h), 0), h - 1)
                    # 获取RGB颜色值并归一化到[0,1]范围
                    color = color_image[v, u] / 255.0
                    colors.append(color)

                # 设置点云颜色
                pcd.colors = o3d.utility.Vector3dVector(np.array(colors))

                # 在新线程中显示点云
                self.pc_thread = PointCloudThread(pcd)
                self.pc_thread.finished.connect(self.on_pointcloud_closed)
                self.pc_thread.start()

                # 禁用点云按钮，直到显示完成
                self.pc_show.setEnabled(False)
        except Exception as e:
            print(f"准备点云数据时出错: {str(e)}")

    def on_pointcloud_closed(self):
        """点云窗口关闭后的处理"""
        self.pc_show.setEnabled(True)  # 重新启用按钮

    def change_resolution(self, resolution):
        """切换相机分辨率"""
        if self.camera.change_resolution(resolution):
            print(f"分辨率已更改为 {resolution}")
        else:
            print("分辨率更改失败")
            # 恢复下拉框选项
            self.resolution_combo.setCurrentText(self.camera.current_resolution)

# ========================
# 主程序入口
# ========================

if __name__ == "__main__":
    camera = RealSenseThread()
    temp_path = "Dobot/temp"
    if not os.path.exists(temp_path):
        os.makedirs(temp_path)
        print("创建临时文件夹成功！")

    try:
        while True:
            color_frame, depth_frame = camera.get_images()

            if color_frame is not None:
                cv2.imshow('Aligned Color', color_frame)

            if depth_frame is not None:
                depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_frame, alpha=0.03), cv2.COLORMAP_JET)
                cv2.imshow('Aligned Depth (to Color)', depth_colormap)

            key = cv2.waitKey(1)
            if key == ord('q'):
                break
            elif key == ord('p'):
                print("正在获取点云数据...")
                pc_data = camera.get_pointcloud()
                path = os.path.join(temp_path, "pointcloud.ply")
                camera.save_pointcloud_to_ply(filename=path)
            elif key == ord('s'):
                print("保存相机图像...")
                color_path = os.path.join(temp_path, "color_image.png")
                cv2.imwrite(color_path, color_frame)
                depth_path = os.path.join(temp_path, "depth_image.png")
                cv2.imwrite(depth_path, depth_frame)
                print("图像保存成功")
    finally:
        camera.stop()
        cv2.destroyAllWindows()