import av
import numpy as np
from typing import Iterator

from hand.track.umetrack.datasets.video_pose_data import _load_hand_pose_labels, VideoStream
from hand.track.umetrack.hand.handpose import SingleHandPose
from hand.track.umetrack.pipelines.track.tracker import InputFrame, ViewData
from .base import Loader


class UmetrackLoader(Loader):
    def __init__(self, data_path: str):
        self._data_path = data_path

    def __len__(self) -> int:
        container = av.open(self._data_path)
        # take first video stream
        stream = container.streams.video[0]
        return stream.frames

    def __iter__(self) -> Iterator[np.ndarray]:
        container = av.open(self._data_path)
        # take first video stream
        stream = container.streams.video[0]
        print(f"Opened ({int(stream.average_rate)} fps) video from {self._data_path}")

        for idx, frame in enumerate(container.decode(stream)):
            # raw_mono_image_np = np.array(frame.to_image())[..., 0]
            raw_mono_image_np = np.array(frame.to_image())  # shape H*(4*W)*3
            H, W4, C = raw_mono_image_np.shape
            W = W4 // 4
            # 将 (H, 4*W, 3) reshape 为 (4, H, W, 3)
            split_images = raw_mono_image_np.reshape(H, 4, W, C).transpose(1, 0, 2, 3)  # → (4, H, W, 3)
            yield split_images


class UmetrackGTLoader(Loader):
    def __init__(self, data_path: str):
        label_path = data_path[:-4] + ".json"
        self._hand_pose_labels = _load_hand_pose_labels(label_path)
        self._image_stream = VideoStream(data_path)
        assert len(self._hand_pose_labels) == len(self._image_stream)

    def __len__(self) -> int:
        return len(self._image_stream)

    def __iter__(self):
        for frame_idx, raw_mono in enumerate(self._image_stream):
            gt_tracking = {}
            for hand_idx in range(0, 2):
                if self._hand_pose_labels.hand_confidences[frame_idx, hand_idx] > 0:
                    gt_tracking[hand_idx] = SingleHandPose(
                        joint_angles=self._hand_pose_labels.joint_angles[
                            frame_idx, hand_idx
                        ],
                        wrist_xform=self._hand_pose_labels.wrist_transforms[
                            frame_idx, hand_idx
                        ],
                        hand_confidence=self._hand_pose_labels.hand_confidences[
                            frame_idx, hand_idx
                        ],
                    )

            multi_view_images = raw_mono.reshape(
                raw_mono.shape[0], len(self._hand_pose_labels.cameras), -1
            )

            # 检查相机到世界坐标的变换矩阵是否有效（全零表示无效）
            invalid_camera_to_world = (
                    self._hand_pose_labels.camera_to_world_transforms[frame_idx].sum() == 0
            )
            if invalid_camera_to_world:
                assert (
                    not gt_tracking
                ), f"Cameras are not tracked, expecting no ground truth tracking!"

            views = []
            for cam_idx in range(0, len(self._hand_pose_labels.cameras)):
                cur_camera = self._hand_pose_labels.cameras[cam_idx].copy(
                    camera_to_world_xf=self._hand_pose_labels.camera_to_world_transforms[
                        frame_idx, cam_idx
                    ],
                )

                views.append(
                    ViewData(
                        image=multi_view_images[:, cam_idx, :],
                        camera=cur_camera,
                        camera_angle=self._hand_pose_labels.camera_angles[cam_idx],
                    )
                )

            input_frame = InputFrame(views=views)
            yield input_frame, gt_tracking
