import pyrealsense2 as rs
import numpy as np
import cv2
import os
import time
import multiprocessing as mp
import math
import numpy as np

def stitch_images(images, max_cols=4, padding=0, pad_value=0):
    """
    将 numpy 图片列表拼接为网格，每行最多 max_cols 张图片
    参数:
        images: list[np.ndarray]  每张图片 shape=(H,W) 或 (H,W,C)
        max_cols: int             每行最多图片数 (默认 4)
        padding: int              图片间距像素数
        pad_value: int            间距区域填充值 (0-255)
    返回:
        stitched: np.ndarray      拼接后的大图
    """
    if len(images) == 0:
        raise ValueError("images 列表为空")

    # 确认所有图片大小一致
    H, W = images[0].shape[:2]
    for i, img in enumerate(images):
        if img.shape[0] != H or img.shape[1] != W:
            raise ValueError(f"第 {i} 张图片尺寸 {img.shape[:2]} 与第一张 {H,W} 不一致")

    # 统一通道数（灰度转3通道）
    def to3ch(img):
        if img.ndim == 2:
            return np.stack([img]*3, axis=-1)
        if img.ndim == 3 and img.shape[2] == 1:
            return np.concatenate([img]*3, axis=2)
        return img
    imgs = [to3ch(img).astype(np.uint8) for img in images]

    n = len(imgs)
    cols = min(max_cols, n)
    rows = math.ceil(n / cols)

    out_h = rows * H + (rows - 1) * padding
    out_w = cols * W + (cols - 1) * padding

    stitched = np.full((out_h, out_w, 3), pad_value, dtype=np.uint8)

    for idx, img in enumerate(imgs):
        r = idx // cols
        c = idx % cols
        y0 = r * (H + padding)
        x0 = c * (W + padding)
        stitched[y0:y0+H, x0:x0+W, :] = img

    return stitched

def worker(shared_value, lock, pipeline, idx):
    
    with lock:  # 加锁防止竞争
        print(f"进程 {os.getpid()} 运行, 当前值 = {shared_value.value}")
        shared_value.value -= 1
        print(f"进程 {os.getpid()} 修改后, 当前值 = {shared_value.value}")
    time.sleep(0.5)


target = "image"

# 获取连接的所有相机设备
context = rs.context()
devices = context.query_devices()

# 打印连接的设备数
print(f"Found {len(devices)} devices")

# 存储每个相机的pipeline
pipelines = []

# 遍历所有相机
for i, device in enumerate(devices):
    # 打印相机的名称
    print(f"Opening device {i}: {device.get_info(rs.camera_info.name)}")
    
    # 创建一个Pipeline
    pipeline = rs.pipeline()
    
    # 配置相机流
    config = rs.config()
    config.enable_device(device.get_info(rs.camera_info.serial_number))  # 使用设备序列号来识别相机
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)  # 配置颜色流（你可以根据需求更改流类型）
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    os.makedirs(os.path.join(target, f"{i}"), exist_ok=True)
    # 开始流
    profile = pipeline.start(config)
    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: " , depth_scale)

    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 1 #1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale
    align_to = rs.stream.color
    align = rs.align(align_to)
    # 保存pipeline
    pipelines.append((pipeline, clipping_distance, align))

frame_count = 0
# 获取和显示多个相机的数据
cam_images = [[] for _ in range(len(devices))]
try:
    while True: 
        
        for i, (pipeline, clipping_distance, align) in enumerate(pipelines):
            # 等待并获取相机的帧
            frames = pipeline.wait_for_frames()
            aligned_frames = align.process(frames)
            aligned_depth_frame = aligned_frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            
            if color_frame and aligned_depth_frame:
                # 将帧转换为NumPy数组
                color_image = np.asanyarray(color_frame.get_data())
                depth_image = np.asanyarray(aligned_depth_frame.get_data())
                grey_color = 0
                depth_image_3d = np.dstack((depth_image,depth_image,depth_image))
                bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
                bg_removed = cv2.rotate( bg_removed, cv2.ROTATE_90_COUNTERCLOCKWISE)
                # color_image = cv2.rotate(color_image, cv2.ROTATE_90_COUNTERCLOCKWISE)
                cam_images[i].append(bg_removed)

            # break
        frame_count += 1
        # if images:
        #     # 将所有图像并排显示
        #     # 假设图像尺寸相同，横向拼接
        #     stacked_images = stitch_images(images, 4)
        #     show_image = cv2.resize(stacked_images,(stacked_images.shape[1]//4, stacked_images.shape[0] //4))
        #     # 显示拼接后的图像
        #     cv2.imshow("All Cameras", show_image)
        
        
        # 检查是否按下了退出键（例如Esc键）
        if frame_count == 100:  # 27是Esc键的ASCII值
            break
        
    for i , images in enumerate(cam_images):
        for j, image in enumerate(images):
            cv2.imwrite(os.path.join(target, str(i), f"frame_{j:04d}.png"), image)

except KeyboardInterrupt:
    print("Stopping stream...")

finally:
    # 关闭所有相机
    for pipeline,_,_ in pipelines:
        pipeline.stop()
    
    # 关闭所有OpenCV窗口
    cv2.destroyAllWindows()
