import glob
import os.path

import av
import json

import cv2
import torch
import numpy as np

from dataclasses import dataclass
from typing import Iterator, List
from ..utils.file import fs as fs
from ..camera.camera import CameraModel, read_camera_from_json, PinholePlaneCameraModel, NoDistortion
from ..hand.hand import HandModel
from ..hand.handpose import SingleHandPose

from ..pipelines.track.tracker import InputFrame, ViewData


@dataclass
class HandPoseLabels:
    cameras: List[CameraModel]
    camera_angles: List[float]
    camera_to_world_transforms: np.ndarray
    hand_model: HandModel
    joint_angles: np.ndarray
    wrist_transforms: np.ndarray
    hand_confidences: np.ndarray

    def __len__(self):
        return len(self.joint_angles)


class VideoStream:
    def __init__(self, data_path: str):
        self._data_path = data_path

    def __len__(self) -> int:
        container = av.open(self._data_path)
        # take first video stream
        stream = container.streams.video[0]
        return stream.frames

    def __iter__(self) -> Iterator[np.ndarray]:
        container = av.open(self._data_path)
        # take first video stream
        stream = container.streams.video[0]
        print(f"Opened ({int(stream.average_rate)} fps) video from {self._data_path}")

        for idx, frame in enumerate(container.decode(stream)):
            raw_mono_image_np = np.array(frame.to_image())[..., 0]
            yield raw_mono_image_np


def _load_json(p: str):
    with fs.open(p, "rb") as bf:
        return json.load(bf)


def load_hand_model_from_dict(hand_model_dict) -> HandModel:
    hand_tensor_dict = {}
    for k, v in hand_model_dict.items():
        if isinstance(v, list):
            hand_tensor_dict[k] = torch.Tensor(v)
        else:
            hand_tensor_dict[k] = torch.Tensor((v,))

    hand_model = HandModel(**hand_tensor_dict)
    return hand_model


def _load_hand_pose_labels(p: str) -> HandPoseLabels:
    labels = _load_json(p)
    cameras = [read_camera_from_json(c) for c in labels["cameras"]]
    camera_angles = labels["camera_angles"]
    hand_model = load_hand_model_from_dict(labels["hand_model"])
    joint_angles = np.array(labels["joint_angles"])
    wrist_transforms = np.array(labels["wrist_transforms"])
    hand_confidences = np.array(labels["hand_confidences"])
    camera_to_world_transforms = np.array(labels["camera_to_world_transforms"])

    return HandPoseLabels(
        cameras=cameras,
        camera_angles=camera_angles,
        camera_to_world_transforms=camera_to_world_transforms,
        hand_model=hand_model,
        joint_angles=joint_angles,
        wrist_transforms=wrist_transforms,
        hand_confidences=hand_confidences,
    )


class SyncedImagePoseStream:
    def __init__(self, data_path: str):
        label_path = data_path[:-4] + ".json"
        self._hand_pose_labels = _load_hand_pose_labels(label_path)
        self._image_stream = VideoStream(data_path)
        assert len(self._hand_pose_labels) == len(self._image_stream)

    def __len__(self) -> int:
        return len(self._image_stream)

    def __iter__(self):
        for frame_idx, raw_mono in enumerate(self._image_stream):
            gt_tracking = {}
            for hand_idx in range(0, 2):
                if self._hand_pose_labels.hand_confidences[frame_idx, hand_idx] > 0:
                    gt_tracking[hand_idx] = SingleHandPose(
                        joint_angles=self._hand_pose_labels.joint_angles[
                            frame_idx, hand_idx
                        ],
                        wrist_xform=self._hand_pose_labels.wrist_transforms[
                            frame_idx, hand_idx
                        ],
                        hand_confidence=self._hand_pose_labels.hand_confidences[
                            frame_idx, hand_idx
                        ],
                    )

            multi_view_images = raw_mono.reshape(
                raw_mono.shape[0], len(self._hand_pose_labels.cameras), -1
            )

            # 检查相机到世界坐标的变换矩阵是否有效（全零表示无效）
            invalid_camera_to_world = (
                    self._hand_pose_labels.camera_to_world_transforms[frame_idx].sum() == 0
            )
            if invalid_camera_to_world:
                assert (
                    not gt_tracking
                ), f"Cameras are not tracked, expecting no ground truth tracking!"

            views = []
            for cam_idx in range(0, len(self._hand_pose_labels.cameras)):
                cur_camera = self._hand_pose_labels.cameras[cam_idx].copy(
                    camera_to_world_xf=self._hand_pose_labels.camera_to_world_transforms[
                        frame_idx, cam_idx
                    ],
                )

                views.append(
                    ViewData(
                        image=multi_view_images[:, cam_idx, :],
                        camera=cur_camera,
                        camera_angle=self._hand_pose_labels.camera_angles[cam_idx],
                    )
                )

            input_frame = InputFrame(views=views)
            yield input_frame, gt_tracking


def sort_json_files_by_timestamp(folder_path):
    """
    按照JSON文件中Camera0.image_name的时间戳对JSON文件进行排序

    参数:
        folder_path: 包含JSON文件的文件夹路径

    返回:
        按时间戳排序后的JSON文件名列表
    """
    # 获取所有JSON文件
    json_files = glob.glob(os.path.join(folder_path, "*.json"))

    # 存储文件名和时间戳的元组列表
    file_timestamp_pairs = []

    for file_path in json_files:
        try:
            with open(file_path, 'r') as f:
                data = json.load(f)

            # 提取时间戳（假设image_name是类似"1234567890.jpg"的格式）
            image_name = data.get("Camera0", {}).get("image_name", "")

            # 移除文件扩展名并转换为整数
            timestamp = int(image_name.split('.')[0])

            # 存储文件名和时间戳
            file_timestamp_pairs.append((os.path.basename(file_path), timestamp))

        except (json.JSONDecodeError, KeyError, ValueError, IndexError) as e:
            print(f"处理文件 {file_path} 时出错: {e}")
            continue

    # 按时间戳排序
    file_timestamp_pairs.sort(key=lambda x: x[1])

    # 返回排序后的文件名列表
    return [file for file, timestamp in file_timestamp_pairs]


class YVRImagePoseStream:
    def __init__(self, data_path: str, time_order=False):
        self.data_path = data_path
        self.json_path = os.path.join(data_path, "json_file")
        self.json_files = os.listdir(self.json_path)
        if time_order:
            self.json_files = sort_json_files_by_timestamp(self.json_path)

        self.camera_angles = [15, -145, 145, -15, -140, -40]
        # self.hand_model = load_hand_model_from_dict(_load_json(hand_model_path))

    def __len__(self) -> int:
        return len(self.json_files)

    def __iter__(self):
        for frame_idx, js_file in enumerate(self.json_files):
            js_data = _load_json(os.path.join(self.json_path, js_file))
            gt_tracking = {}
            multi_view_images = []
            all_cameras = []

            for key, value in js_data.items():
                if key == "imu_xyz":
                    gt_tracking[1] = np.array(value)
                else:
                    camera_name = key
                    image_name = value["image_name"]
                    image_path = os.path.join(self.data_path, camera_name, image_name)
                    raw_image = cv2.imread(image_path)[..., 0]
                    multi_view_images.append(raw_image)

                    cam_to_imu_extrinsic = np.array(value["cam_to_imu_extrinsic"])
                    cam_to_imu_extrinsic = np.vstack([cam_to_imu_extrinsic, [0, 0, 0, 1]])
                    intrinsic = value["intrinsic"]
                    fx, fy = intrinsic[0][0], intrinsic[1][1]
                    cx, cy = intrinsic[0][2], intrinsic[1][2]

                    camera = PinholePlaneCameraModel(
                        width=raw_image.shape[1],  # 图像宽度
                        height=raw_image.shape[0],  # 图像高度
                        f=(fx, fy),  # 焦距
                        c=(cx, cy),  # 光心
                        distort_coeffs=NoDistortion(),  # 不进行畸变校正
                        camera_to_world_xf=cam_to_imu_extrinsic  # 相机的外参矩阵
                    )
                    all_cameras.append(camera)
            multi_view_images = np.stack(multi_view_images)

            views = []
            for cam_idx in range(0, 6):
                cur_camera = all_cameras[cam_idx]
                views.append(
                    ViewData(
                        image=multi_view_images[cam_idx, :, :],
                        camera=cur_camera,
                        camera_angle=self.camera_angles[cam_idx]
                    )
                )

            input_frame = InputFrame(views=views)
            yield input_frame, gt_tracking


IMAGE_WIDTH = 640
IMAGE_HEIGHT = 480
NUM_CAMERAS = 6  # 相机数量
NUM_HANDS = 2


class YVRDetectImageStream:
    def __init__(self, data_path: str):
        self.data_path = data_path
        self.json_path = os.path.join(data_path, "JsonFiles")
        self.json_files = os.listdir(self.json_path)

        self.camera_angles = [15, -145, 145, -15, -140, -40]
        # self.hand_model = load_hand_model_from_dict(_load_json(hand_model_path))

    def __len__(self) -> int:
        return len(self.json_files)

    def build_intrinsics_matrix(self, pinhole_data):
        """从pinhole数据构建内参矩阵"""
        fx = pinhole_data['fx']
        fy = pinhole_data['fy']
        cx = pinhole_data['cx']
        cy = pinhole_data['cy']

        intrinsics = np.eye(3, dtype=np.float32)
        intrinsics[0, 0] = fx
        intrinsics[1, 1] = fy
        intrinsics[0, 2] = cx
        intrinsics[1, 2] = cy

        return intrinsics

    def build_extrinsics_matrix(self, rotation, translation):
        """从旋转和平移构建外参矩阵（4x4齐次变换矩阵）"""
        extrinsics = np.eye(4, dtype=np.float32)

        # 设置旋转部分 (3x3)
        extrinsics[:3, :3] = np.array(rotation).reshape(3, 3)

        # 设置平移部分 (3x1)
        extrinsics[:3, 3] = np.array(translation)

        return extrinsics

    def rcc_to_ric(self, rcc, tcc, ric0, tic0):
        """将相对于Camera0的外参(RCC, TCC)转换为相对于IMU的外参(RIC, TIC)

        公式:
        R_IC = R_I0 * R_0C
        t_IC = R_I0 * t_0C + t_I0

        其中:
        - R_I0, t_I0: Camera0相对于IMU的变换 (来自RIC0和TIC0)
        - R_0C, t_0C: 当前相机相对于Camera0的变换 (来自RCC和TCC)
        """
        # 将RIC0和TIC0转换为4x4变换矩阵
        T_I0 = self.build_extrinsics_matrix(ric0, tic0)

        # 将RCC和TCC转换为4x4变换矩阵
        T_0C = self.build_extrinsics_matrix(rcc, tcc)

        # 计算当前相机相对于IMU的变换: T_IC = T_I0 * T_0C
        T_IC = np.matmul(T_I0, T_0C)

        # 提取旋转和平移部分
        ric = T_IC[:3, :3].flatten().tolist()
        tic = T_IC[:3, 3].tolist()

        return ric, tic

    def __iter__(self):
        for frame_idx, js_file in enumerate(self.json_files):
            data = _load_json(os.path.join(self.json_path, js_file))
            gt_tracking = {}
            multi_view_images = []
            all_cameras = []

            # 首先获取Camera0的RIC和TIC（作为参考）
            if 'CAM0_RIC' not in data or 'CAM0_TIC' not in data:
                continue

            ric0 = data['CAM0_RIC']
            tic0 = data['CAM0_TIC']

            # 处理每个相机
            for camera_idx in range(NUM_CAMERAS):
                cam_key = f"CAM{camera_idx}"

                # 检查该相机数据是否存在
                if cam_key not in data:
                    continue

                cam_data = data[cam_key]

                # 获取图像路径
                img_path_key = f"{cam_key}_IMG_PATH"
                if img_path_key not in data:
                    continue

                relative_img_path = data[img_path_key]
                image_file = os.path.join(self.data_path, relative_img_path)

                if not os.path.exists(image_file):
                    print(f"Warning: Image file not found: {image_file}")
                    continue

                # 读取图像并转换为灰度图
                image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)
                if image is None:
                    print(f"Warning: Failed to read image: {image_file}")
                    continue

                # 调整图像大小 (如果需要)
                if image.shape != (IMAGE_HEIGHT, IMAGE_WIDTH):
                    image = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT))

                # 准备手部数据
                hand_centers = np.zeros((NUM_HANDS, 2), dtype=np.float32)
                hand_scales = np.ones((NUM_HANDS, 2), dtype=np.float32)
                hand_confidences = np.zeros((NUM_HANDS, 1), dtype=np.float32)

                # 处理左手数据
                if 'LEFT' in cam_data and cam_data['LEFT']['confidence'] > 0:
                    left_data = cam_data['LEFT']
                    hand_centers[0, 0] = left_data['center_x'] * IMAGE_WIDTH
                    hand_centers[0, 1] = left_data['center_y'] * IMAGE_HEIGHT
                    hand_scales[0, 0] = left_data['window_width']
                    hand_scales[0, 1] = left_data['window_height']
                    hand_confidences[0, 0] = left_data['confidence']

                # 处理右手数据
                if 'RIGHT' in cam_data and cam_data['RIGHT']['confidence'] > 0:
                    right_data = cam_data['RIGHT']
                    hand_centers[1, 0] = right_data['center_x'] * IMAGE_WIDTH
                    hand_centers[1, 1] = right_data['center_y'] * IMAGE_HEIGHT
                    hand_scales[1, 0] = right_data['window_width']
                    hand_scales[1, 1] = right_data['window_height']
                    hand_confidences[1, 0] = right_data['confidence']

                # 构建内参矩阵
                pinhole_key = f"{cam_key}_PINHOLE"
                if pinhole_key not in data:
                    continue

                intrinsics = self.build_intrinsics_matrix(data[pinhole_key])

                # 构建外参矩阵（相对于IMU）
                if camera_idx == 0:
                    # Camera0直接使用RIC和TIC
                    ric = ric0
                    tic = tic0
                else:
                    # 其他相机需要将RCC和TCC转换为相对于IMU的RIC和TIC
                    rcc_key = f"{cam_key}_RCC"
                    tcc_key = f"{cam_key}_TCC"

                    if rcc_key not in data or tcc_key not in data:
                        continue

                    # 转换坐标系统
                    ric, tic = self.rcc_to_ric(data[rcc_key], data[tcc_key], ric0, tic0)

                # 构建外参矩阵
                extrinsics = self.build_extrinsics_matrix(ric, tic)

                # 转换为Tensor并创建样本
                image_tensor = torch.from_numpy(image / 255.0).float().unsqueeze(0)
                centers_tensor = torch.from_numpy(hand_centers)
                scales_tensor = torch.from_numpy(hand_scales)
                confidences_tensor = torch.from_numpy(hand_confidences)

                # 创建样本
                sample = {
                    'image': (image_tensor.numpy() * 255).astype('uint8'),
                    'centers': centers_tensor.numpy(),
                    'scales': scales_tensor.numpy(),
                    'confidences': confidences_tensor.numpy(),
                    'view_index': np.array([camera_idx], dtype=np.uint8),
                    'frame_index': np.array([cam_data.get('FRAME_ID', 0)], dtype=np.uint32),
                    'intrinsics': intrinsics,
                    'extrinsics': extrinsics,
                }

                gt_tracking[camera_idx] = sample

                multi_view_images.append(image)

                cam_to_imu_extrinsic = extrinsics
                intrinsic = intrinsics
                fx, fy = intrinsic[0][0], intrinsic[1][1]
                cx, cy = intrinsic[0][2], intrinsic[1][2]

                camera = PinholePlaneCameraModel(
                    width=image.shape[1],  # 图像宽度
                    height=image.shape[0],  # 图像高度
                    f=(fx, fy),  # 焦距
                    c=(cx, cy),  # 光心
                    distort_coeffs=NoDistortion(),  # 不进行畸变校正
                    camera_to_world_xf=cam_to_imu_extrinsic  # 相机的外参矩阵
                )
                all_cameras.append(camera)
            multi_view_images = np.stack(multi_view_images)

            views = []
            for cam_idx in range(0, 6):
                cur_camera = all_cameras[cam_idx]
                views.append(
                    ViewData(
                        image=multi_view_images[cam_idx, :, :],
                        camera=cur_camera,
                        camera_angle=self.camera_angles[cam_idx]
                    )
                )

            input_frame = InputFrame(views=views)
            yield input_frame, gt_tracking



