import os
import re
import sys
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, List, Any, Tuple

import cv2
import numpy as np
import matplotlib.pyplot as plt
from jsonargparse import ArgumentParser
from tqdm import tqdm

from mcap_base_reader import MCAPBaseReader, SyncedMCAPData


@dataclass(frozen=True)
class ConversionConfig:
    """批量将 MCAP 文件转换为 MP4 视频和绘图的配置"""

    task_name: str = field(default_factory=str)
    input_dir: str = field(default_factory=str)
    output_dir: str = field(default_factory=str)
    max_workers: int = os.cpu_count() // 2 or 1
    fps: int = 20

    def __post_init__(self):
        object.__setattr__(self, "input_dir", Path(self.input_dir))
        object.__setattr__(self, "output_dir", Path(self.output_dir))
        if isinstance(self.task_name, str):
            object.__setattr__(self, "task_name", self.task_name.split())


class MCAPConverter(MCAPBaseReader):
    """
    处理单个 MCAP 文件，从基类获取同步数据，
    并将其保存为 MP4 视频和数据曲线图。
    """

    def __init__(
        self, mcap_file: Path, output_base_path: Path, config: ConversionConfig
    ):
        super().__init__(mcap_file)
        self.output_base_path = output_base_path
        self.config = config

    def convert(self) -> Dict[str, Any]:
        """执行转换流程，并返回结果字典"""
        result = {
            "input_file": str(self.mcap_file),
            "output_files": [],
            "plot_files": [],
            "success": False,
            "error": None,
        }
        try:
            # 1. 从基类获取同步后的数据
            synced_data = self.read_and_sync_data()

            if synced_data is None:
                result["error"] = "未找到有效的关节或图像数据（或同步失败）"
                print(
                    f"[!] 警告: 文件 {self.mcap_file.name} 因'{result['error']}'被跳过。"
                )
                return result

            # 2. 保存视频
            # synced_data.images 格式: {"left_camera": [T,H,W,C], ...}
            created_video_files = self._create_videos(synced_data.images)
            result["output_files"] = created_video_files

            # 3. 分别为 qpos 和 gpos 创建并保存绘图

            # 处理 QPOS 绘图
            if synced_data.qpos_obs is not None and synced_data.qpos_act is not None:
                qpos_plot_path = (
                    self.output_base_path.parent
                    / "qpos_plots"
                    / f"{self.output_base_path.name}.png"
                )
                self._plot_and_save_data(
                    synced_data.qpos_obs,
                    synced_data.qpos_act,
                    qpos_plot_path,
                    plot_type="qpos",
                )
                result["plot_files"].append(str(qpos_plot_path))

            # 处理 GPOS 绘图
            if synced_data.gpos_obs is not None and synced_data.gpos_act is not None:
                gpos_plot_path = (
                    self.output_base_path.parent
                    / "gpos_plots"
                    / f"{self.output_base_path.name}.png"
                )
                self._plot_and_save_data(
                    synced_data.gpos_obs,
                    synced_data.gpos_act,
                    gpos_plot_path,
                    plot_type="gpos",
                )
                result["plot_files"].append(str(gpos_plot_path))

            result["success"] = True
            return result
        except Exception as e:
            result["error"] = str(e)
            print(f"[!] 错误: 转换文件 {self.mcap_file.name} 时发生异常: {e}")
            return result

    def _create_videos(self, image_datasets: Dict[str, List[np.ndarray]]) -> List[str]:
        """
        (此方法保持不变，但输入参数类型已更新)
        image_datasets: {"left_camera": [T,H,W,C], "env_camera": ...}
        """

        # 我们需要按 HDF5 的顺序 (cam_left, cam_head, cam_right) 拼接
        # 基类的 CAMERA_MAPPING 提供了这个顺序
        # hdf5_cam_name: "cam_left", mcap_cam_key: "left_camera"

        frames_per_cam = {}
        for hdf5_cam_name, mcap_cam_key in self.CAMERA_MAPPING.items():
            if mcap_cam_key not in image_datasets:
                print(f"[!] 警告: 缺少相机 {mcap_cam_key} 的数据，无法拼接视频。")
                return []
            frames_per_cam[hdf5_cam_name] = image_datasets[mcap_cam_key]

        num_frames = len(frames_per_cam[self.camera_names[0]])
        if num_frames == 0:
            return []

        first_frame = frames_per_cam[self.camera_names[0]][0]
        height, width, _ = first_frame.shape
        combined_width = width * len(self.camera_names)

        output_dir = self.output_base_path.parent / "combined_video"
        output_dir.mkdir(parents=True, exist_ok=True)
        output_file = output_dir / f"{self.output_base_path.name}.mp4"

        fourcc = cv2.VideoWriter_fourcc(*"mp4v")
        writer = cv2.VideoWriter(
            str(output_file), fourcc, self.config.fps, (combined_width, height)
        )

        for i in range(num_frames):
            # 按照 self.camera_names (cam_left, cam_head, cam_right) 的顺序拼接
            frames_to_stack = [
                frames_per_cam[cam_name][i] for cam_name in self.camera_names
            ]
            combined_frame = np.hstack(frames_to_stack)
            writer.write(cv2.cvtColor(combined_frame, cv2.COLOR_RGB2BGR))

        writer.release()
        return [str(output_file)]

    def _plot_and_save_data(
        self,
        observation_data: np.ndarray,
        action_data: np.ndarray,
        output_path: Path,
        plot_type: str,
    ):
        """(此方法保持不变)"""
        if observation_data.size == 0 or action_data.size == 0:
            print(f"[*] 警告: {plot_type.upper()} 数据为空，跳过绘图。")
            return

        num_dims = observation_data.shape[1]
        ncols = 2
        nrows = (num_dims + ncols - 1) // ncols
        fig, axes = plt.subplots(
            nrows, ncols, figsize=(12, nrows * 3.5), constrained_layout=True
        )
        fig.suptitle(f"{plot_type.upper()} Data: Observation vs. Action", fontsize=16)
        axes = axes.flatten()

        # 根据绘图类型生成标题
        titles = []
        if plot_type == "qpos":
            if num_dims == 14:
                titles.extend([f"L Arm qpos Dim {i}" for i in range(6)])
                titles.append("L EEF qpos (Gripper)")
                titles.extend([f"R Arm qpos Dim {i}" for i in range(6)])
                titles.append("R EEF qpos (Gripper)")
            else:
                # 如果维度不是 14，使用通用标题作为后备
                print(
                    f"[*] 警告: 预期 qpos 维度为 14，实际为 {num_dims}。使用通用标题。"
                )
                titles = [f"qpos Dim {i}" for i in range(num_dims)]
        elif plot_type == "gpos":
            gpos_sides = ["L", "R"]
            # 注意：这必须与 _get_concatenated_gpos 中定义的维度顺序严格一致
            gpos_dim_map = ["pos_x", "pos_y", "pos_z", "roll", "pitch", "yaw", "eef"]
            for side in gpos_sides:
                for dim_name in gpos_dim_map:
                    titles.append(f"{side} Arm {dim_name}")

        for i in range(num_dims):
            ax = axes[i]
            ax.plot(observation_data[:, i], label="observation")
            ax.plot(action_data[:, i], label="action", linestyle="--")
            ax.set_title(titles[i] if i < len(titles) else f"Dim {i}")
            ax.set_xlabel("Timestep")
            ax.set_ylabel("Value")
            ax.legend()
            ax.grid(True)

        for i in range(num_dims, len(axes)):
            fig.delaxes(axes[i])

        output_path.parent.mkdir(parents=True, exist_ok=True)
        plt.savefig(output_path, dpi=150)
        plt.close(fig)


def convert_file_worker(
    mcap_file: Path, output_base_path: Path, config: ConversionConfig
) -> Dict:
    converter = MCAPConverter(mcap_file, output_base_path, config)
    return converter.convert()


def main():
    parser = ArgumentParser()
    parser.add_class_arguments(ConversionConfig, as_group=False)
    args = parser.parse_args()
    config = ConversionConfig(**vars(args))

    print(
        f"输入目录: {config.input_dir}\n输出目录: {config.output_dir}\n任务: {config.task_name}"
    )

    jobs = []
    if not config.task_name:
        print("[!] 错误: 未指定任何 task_name。请使用 --task_name 提供任务名称。")
        return

    for task in config.task_name:
        task_input_dir = config.input_dir / task
        task_output_dir = config.output_dir / task
        if not task_input_dir.is_dir():
            print(
                f"[*] 警告: 任务 '{task}' 的输入目录 {task_input_dir} 不存在，已跳过。"
            )
            continue

        def extract_episode_index(file_path: Path) -> int:
            match = re.search(r"(\d+)", file_path.stem)
            return int(match.group(1)) if match else float("inf")

        mcap_files = sorted(
            list(task_input_dir.rglob("*.mcap")), key=extract_episode_index
        )
        for mcap_file in mcap_files:
            ep_index = extract_episode_index(mcap_file)
            output_base_path = task_output_dir / f"episode_{ep_index}"
            jobs.append((mcap_file, output_base_path, config))

    if not jobs:
        print("[!] 错误: 在指定的任务目录中未找到任何需要转换的 .mcap 文件。")
        return

    print(
        f"共找到 {len(jobs)} 个 MCAP 文件需要转换。使用 {config.max_workers} 个进程开始处理..."
    )

    with ProcessPoolExecutor(max_workers=config.max_workers) as executor:
        futures = {
            executor.submit(convert_file_worker, mcap, out_base, cfg): mcap
            for mcap, out_base, cfg in jobs
        }
        for future in tqdm(as_completed(futures), total=len(jobs), desc="转换所有文件"):
            future.result()  # 确保捕获异常

    print(f"\n--- 转换完成 ---")
    print(f"所有文件已保存至: {config.output_dir.resolve()}")


if __name__ == "__main__":
    main()
