from ctypes import alignment
from moviepy import *
from turtle import color
import pyrealsense2 as rs
import numpy as np
import cv2
import os
import time
import multiprocessing as mp
import math
import imageio.v2 as imageio
from datetime import datetime

def save_video_from_dict_list(frame_list, output_path, codec="libx264"):
    """
    根据一个字典数组保存视频，每个字典包含 'timestamp' 和 'image'。
    
    参数：
    - frame_list: list of dict，每个 dict 必须包含:
        - 'timestamp': str，格式 "%Y%m%d_%H%M%S_%f"
        - 'image': np.ndarray
    - output_path: str，保存视频路径
    - codec: str，视频编码器，默认 "libx264"
    """
    if not frame_list:
        raise ValueError("frame_list 不能为空")

    # 提取 timestamps 和 images
    timesteps = [frame["timestamp"] for frame in frame_list]
    frames = [frame["image"] for frame in frame_list]

    # 将时间戳字符串转换为 datetime 对象
    times = [datetime.strptime(ts, "%Y%m%d_%H%M%S_%f") for ts in timesteps]

    # 计算每帧持续时间（秒）
    durations = [(t2 - t1).total_seconds() for t1, t2 in zip(times[:-1], times[1:])]
    if len(durations) > 0:
        durations.append(durations[-1])  # 最后一帧持续时间用前一帧
    else:
        durations.append(1/30)  # 默认 1/30 秒

    # 创建视频 clip
    clip = ImageSequenceClip(frames, durations=durations)
    clip.write_videofile(output_path, codec=codec)

def filter_frames_by_timestamp(frame_list, min_timestamp, max_timestamp):
    """
    截取 frame_list 中 timestamp 在 [min_timestamp, max_timestamp] 的帧。
    
    参数：
    - frame_list: list of dict，每个 dict 包含 'timestamp' 和 'image'
    - min_timestamp: str, "%Y%m%d_%H%M%S_%f"
    - max_timestamp: str, "%Y%m%d_%H%M%S_%f"
    
    返回：
    - 截取后的 frame_list
    """
    # 转换为 datetime
    min_time = datetime.strptime(min_timestamp, "%Y%m%d_%H%M%S_%f")
    max_time = datetime.strptime(max_timestamp, "%Y%m%d_%H%M%S_%f")

    # 过滤
    filtered_frames = [
        frame for frame in frame_list
        if min_time <= datetime.strptime(frame["timestamp"], "%Y%m%d_%H%M%S_%f") <= max_time
    ]

    return filtered_frames

def worker(shared_array, devices_serial_number, idx, render_number, target):
    # 创建一个Pipeline
    pipeline = rs.pipeline()
    
    # 配置相机流
    config = rs.config()
    config.enable_device(devices_serial_number[idx])  # 使用设备序列号来识别相机
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)  # 配置颜色流（你可以根据需求更改流类型）

    save_path = os.path.join(target, f"{idx}")
    os.makedirs(save_path, exist_ok=True)
    current_render_number = 0
    images = []
    shared_array[idx] = 1
    pipeline.start(config)
    while True:
        if all(x == 1 for x in shared_array[:]):
            break
        time.sleep(0.1)
    print(f"camera {idx} start capture!")
    # 开始流

    while current_render_number < render_number:
        frames = pipeline.wait_for_frames()
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
        image = frames.get_color_frame()
        if image:
            color_image = np.asanyarray(image.get_data())
            color_image = cv2.rotate(color_image, cv2.ROTATE_90_COUNTERCLOCKWISE)

        images.append({"timestamp": timestamp, "image": color_image})
        current_render_number += 1
        # print(f"process {os.getpid()} render_number: {current_render_number}")
    pipeline.stop()
    print(f"camera {idx} finish!")
    # time.sleep(0.5)
    for image_dict in images:
        timestamp = image_dict["timestamp"]
        color_image = image_dict["image"]

        cv2.imwrite(os.path.join(save_path, f"frame_{timestamp}.png"), color_image)
        image_dict["image"] = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
    return (images, idx)





if __name__ == "__main__":
    render_number = 300
    target = "dataset\\wz_1"
    # 获取连接的所有相机设备
    context = rs.context()
    devices = context.query_devices()

    # 打印连接的设备数
    print(f"Found {len(devices)} devices")

    # 存储每个相机的pipeline
    pipelines = []
    devices_serial_number = []

    # 遍历所有相机
    for i, device in enumerate(devices):
        # 打印相机的名称
        print(f"Opening device {i}: {device.get_info(rs.camera_info.name)}  {device.get_info(rs.camera_info.serial_number)}")
        devices_serial_number.append(device.get_info(rs.camera_info.serial_number))

    # shared_array = mp.Array('i', [0]*8)
    # lock = mp.Lock()
    manager = mp.Manager()
    shared_array = manager.list([0]*len(devices))

    processes = []
    args = [(shared_array, devices_serial_number, i, render_number, target) for i in range(len(devices))]
    with mp.Pool(len(devices)) as pool:
        results = pool.starmap(worker, args)
    # print(results)
    # exit()
    # for i in range(len(devices_serial_number)):
    #     p = mp.Process(target=worker, args=(shared_array, lock, devices_serial_number, i, render_number, target))
    #     p.start()
    #     processes.append(p)

    min_timestamp = None
    max_timestamp = None
    
    total_images = []
    # for p in processes:
    #     p.join()

    # results = [q.get() for _ in processes]
    for images, idx in results:
        if min_timestamp is None or min_timestamp < images[0]["timestamp"]:
            min_timestamp = images[0]["timestamp"]
        if max_timestamp is None or max_timestamp > images[-1]["timestamp"]:
            max_timestamp = images[-1]["timestamp"]
        total_images.append((images, idx))
    print(f"start_time: {min_timestamp} end_time: {max_timestamp}")
    final_selected_frames = []
    frame_num_list = []
    for images, idx in total_images:
        images = filter_frames_by_timestamp(images, min_timestamp, max_timestamp)
        save_path = os.path.join(target, f"{idx}.mp4")
        save_video_from_dict_list(images, save_path)
    # for images, idx in total_images:
    #     save_image_list = []
    #     save_path = os.path.join(target, f"{idx}.mp4")
    #     writer = imageio.get_writer(save_path, fps=25)
    #     print(f"camera {idx} total frames: {len(images)}")
    #     for image in images:
    #         timestamp = image["timestamp"]
    #         color_image = image["image"]
    #         if timestamp >= min_timestamp and timestamp <= max_timestamp:
    #             color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
    #             save_image_list.append(color_image)
    #             writer.append_data(color_image)
    #     writer.close()
    #     print(f"camera {idx} selected frames: {len(save_image_list)}")
    #     final_selected_frames.append((save_image_list, idx))
    #     frame_num_list.append(len(save_image_list))


    # mean = sum(frame_num_list) // len(frame_num_list)
    # align = True
    # for num in frame_num_list:
    #     if abs(num-mean) > 3:
    #         algin = False
    #         break
    # final_align_frames = min(frame_num_list)
    # print(f"align frames: {final_align_frames}")
    # if align:
    #     for images, idx in final_selected_frames:
    #         save_image_list = images[:final_align_frames]
    #         save_path = os.path.join(target, f"{idx}_align.mp4")
    #         writer = imageio.get_writer(save_path, fps=25)
    #         for image in save_image_list:
    #             writer.append_data(image)
    #         writer.close()

        

   
