import pyrealsense2 as rs
import numpy as np
import cv2
import os
import time
import multiprocessing as mp
import math
import argparse
import json
from utils import point_cloud_from_depth_c2w
from vedo import Points, Plotter

def stitch_images(images, max_cols=4, padding=0, pad_value=0):
    """
    将 numpy 图片列表拼接为网格，每行最多 max_cols 张图片
    参数:
        images: list[np.ndarray]  每张图片 shape=(H,W) 或 (H,W,C)
        max_cols: int             每行最多图片数 (默认 4)
        padding: int              图片间距像素数
        pad_value: int            间距区域填充值 (0-255)
    返回:
        stitched: np.ndarray      拼接后的大图
    """
    if len(images) == 0:
        raise ValueError("images 列表为空")

    # 确认所有图片大小一致
    H, W = images[0].shape[:2]
    for i, img in enumerate(images):
        if img.shape[0] != H or img.shape[1] != W:
            raise ValueError(f"第 {i} 张图片尺寸 {img.shape[:2]} 与第一张 {H,W} 不一致")

    # 统一通道数（灰度转3通道）
    def to3ch(img):
        if img.ndim == 2:
            return np.stack([img]*3, axis=-1)
        if img.ndim == 3 and img.shape[2] == 1:
            return np.concatenate([img]*3, axis=2)
        return img
    imgs = [to3ch(img).astype(np.uint8) for img in images]

    n = len(imgs)
    cols = min(max_cols, n)
    rows = math.ceil(n / cols)

    out_h = rows * H + (rows - 1) * padding
    out_w = cols * W + (cols - 1) * padding

    stitched = np.full((out_h, out_w, 3), pad_value, dtype=np.uint8)

    for idx, img in enumerate(imgs):
        r = idx // cols
        c = idx % cols
        y0 = r * (H + padding)
        x0 = c * (W + padding)
        stitched[y0:y0+H, x0:x0+W, :] = img

    return stitched

def worker(shared_value, lock, pipeline, idx):
    
    with lock:  # 加锁防止竞争
        print(f"进程 {os.getpid()} 运行, 当前值 = {shared_value.value}")
        shared_value.value -= 1
        print(f"进程 {os.getpid()} 修改后, 当前值 = {shared_value.value}")
    time.sleep(0.5)


parser = argparse.ArgumentParser(description="输入相关的参数")
parser.add_argument('--camera','-c', type=str, default='calibration\\2025-10-24\\camera.json')
parser.add_argument(
    '--resolution',
    '-r', 
    type=int, 
    nargs=2, 
    metavar=('WIDTH', 'HEIGHT'),
    default=[640, 360],
    help='设置图像分辨率，例如 --resolution 1920 1080（默认：640x480）'
)

args = parser.parse_args()
resolution = tuple(args.resolution)
with open(args.camera, 'r') as f:
    camera_list = json.load(f)

# 创建绘图器（窗口保持开启）
plt = Plotter(title="Vedo Real-time Point Cloud Viewer", bg="black")

# 初始化点云
points = np.random.rand(2000, 3)
cloud = Points(points, c='green', r=4)
plt.add_global_axes(axtype=4)
# 将点云加入绘图器
plt.show(cloud, interactive=False)  # 不阻塞窗口

# 获取连接的所有相机设备
context = rs.context()
devices = context.query_devices()

# 打印连接的设备数
print(f"Found {len(devices)} devices")

# 存储每个相机的pipeline
pipelines = []

# 遍历所有相机
for i, device in enumerate(devices):
    # 打印相机的名称
    name = device.get_info(rs.camera_info.serial_number)

    K, D, c2w = None, None, None
    for camera in camera_list:
        if name == camera['name']:
            K = camera['intrinsic']
            D = camera['distortion']
            K = np.array(K, dtype=np.float32)
            D = np.array(D, dtype=np.float32)
            rot_matrix = camera["rot_matrix"]
            trans_matrix = camera["trans_matrix"]
            # 构建w2c（world to camera）矩阵，4x4齐次变换矩阵
            rot = np.array(rot_matrix)
            trans = np.array(trans_matrix).reshape(3, 1)
            w2c = np.eye(4)
            w2c[:3, :3] = rot
            w2c[:3, 3] = trans.flatten()
            c2w = np.linalg.inv(w2c)
            break

    print(f"Opening device {i}: {device.get_info(rs.camera_info.name)}")
    
    # 创建一个Pipeline
    pipeline = rs.pipeline()
    
    # 配置相机流
    config = rs.config()
    config.enable_device(device.get_info(rs.camera_info.serial_number))  # 使用设备序列号来识别相机
    config.enable_stream(rs.stream.color, resolution[0], resolution[1], rs.format.bgr8, 30)  # 配置颜色流（你可以根据需求更改流类型）
    config.enable_stream(rs.stream.depth, resolution[0], resolution[1], rs.format.z16, 30)
    # 开始流
    profile = pipeline.start(config)
    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: " , depth_scale)

    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 1.5 #1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale
    align_to = rs.stream.color
    align = rs.align(align_to)
    # 保存pipeline
    pipelines.append((pipeline, clipping_distance, align, K, D, c2w))

frame_count = 0
# 获取和显示多个相机的数据
try:
    while True: 
        images = []  # 存储所有相机的图像
        pc_list = []
        color_list = []
        for i, (pipeline, clipping_distance, align, K, D, c2w) in enumerate(pipelines):
            # 等待并获取相机的帧
            frames = pipeline.wait_for_frames()
            aligned_frames = align.process(frames)
            aligned_depth_frame = aligned_frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            
            if color_frame and aligned_depth_frame:
                # 将帧转换为NumPy数组
                color_image = np.asanyarray(color_frame.get_data())
                depth_image = np.asanyarray(aligned_depth_frame.get_data())
                color_image = cv2.rotate(color_image, cv2.ROTATE_90_COUNTERCLOCKWISE)
                depth_image = cv2.rotate(depth_image, cv2.ROTATE_90_COUNTERCLOCKWISE)
                depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
                image = np.hstack((color_image, depth_colormap))
                images.append(image)
                pc,colors,_ = point_cloud_from_depth_c2w(color_image, depth_image, K, D, c2w)
                pc_list.append(pc)
                color_list.append(colors)

            # break
        frame_count += 1
        if images:
            # 将所有图像并排显示
            # 假设图像尺寸相同，横向拼接
            stacked_images = stitch_images(images, 4)
            show_image = cv2.resize(stacked_images,(stacked_images.shape[1]//2, stacked_images.shape[0] //2))
            # 显示拼接后的图像
            cv2.imshow("All Cameras", show_image)
            all_pc = np.concatenate(pc_list, axis=0)
            all_pc = all_pc[(all_pc[:, 2] > -2) & (all_pc[:, 2] < -0.05)]
            all_colors = np.concatenate(color_list, axis=0)
            plt.remove(cloud)

            # 创建新的点云对象
            cloud = Points(all_pc, c='lime', r=1)
            # 加入到场景中
            plt.add(cloud)

            # 渲染更新
            plt.render()
        
        
        # 检查是否按下了退出键（例如Esc键）
        if cv2.waitKey(1) & 0xFF == 27:  # 27是Esc键的ASCII值
            break
        

except KeyboardInterrupt:
    print("Stopping stream...")

finally:
    # 关闭所有相机
    for pipeline,_,_ in pipelines:
        pipeline.stop()
    
    # 关闭所有OpenCV窗口
    cv2.destroyAllWindows()
    plt.interactive().close()
