import os
import torch
import numpy as np
import cv2

from hand.track.umetrack.camera.camera_utils import _warp_image
from hand.track.umetrack.datasets.video_pose_data import SyncedImagePoseStream
from hand.track.umetrack.pipelines.crop.perspective_crop import gen_crop_cameras
from hand.track.umetrack.pipelines.track.tracker import HandTracker, HandTrackerOpts, MM_TO_M
from torchvision.transforms import Resize
from tqdm import tqdm

SAVE_ROOT = r"/home/duyaoda/UmeTrack/UmeTrack/UmeTrack_data/torch_data"
RESIZE_SIZE = 96
NUM_VIEWS = 4


def process_synced_stream(stream: SyncedImagePoseStream, save_root: str, pre_name: str):
    samples_dir = os.path.join(save_root, "samples")
    # crops_dir = os.path.join(save_root, "crops")
    os.makedirs(samples_dir, exist_ok=True)
    # os.makedirs(crops_dir, exist_ok=True)

    for frame_idx, (input_frame, gt_tracking) in enumerate(tqdm(stream)):
        gt_hand_model = stream._hand_pose_labels.hand_model
        V = len(input_frame.views)

        crop_cameras = gen_crop_cameras(
            [view.camera for view in input_frame.views],
            stream._hand_pose_labels.camera_angles,
            gt_hand_model,
            gt_tracking,
            min_num_crops=1
        )

        for hand_idx, crop_camera_info in crop_cameras.items():
            # Initialize per-view buffers
            left_images_filled = [np.zeros((96, 96), dtype=np.float32) for _ in range(V)]
            intrinsics_filled = [np.eye(3, dtype=np.float32) for _ in range(V)]
            view_masks = [0] * V  # binary mask per view (0 = padded, 1 = valid)

            # Compute extrinsics for all views
            extrinsics_xf = []
            for view in input_frame.views:
                world_to_eye = np.linalg.inv(view.camera.camera_to_world_xf).copy()
                world_to_eye[:3, 3] *= MM_TO_M
                extrinsics_xf.append(world_to_eye.astype(np.float32))

            # Process and save crops
            for cam_idx, crop_camera in crop_camera_info.items():
                view_data = input_frame.views[cam_idx]
                crop_image = _warp_image(view_data.camera, crop_camera, view_data.image)

                # Save single-channel crop to disk
                # crop_filename = os.path.join(
                #     crops_dir,
                #     f"{pre_name}_frame{frame_idx:06d}_hand{hand_idx}_cam{cam_idx}.png"
                # )
                # cv2.imwrite(crop_filename, crop_image)

                # Normalize and store image as single channel
                left_images_filled[cam_idx] = crop_image.astype(np.float32) / 255.0
                intrinsics_filled[cam_idx] = crop_camera.uv_to_window_matrix().astype(np.float32)
                view_masks[cam_idx] = 1

            # Pack ground-truth pose
            joint_angles = gt_tracking[hand_idx].joint_angles.astype(np.float32)
            wrist_xform = gt_tracking[hand_idx].wrist_xform.astype(np.float32)

            # Convert to torch tensors
            images_tensor = torch.stack([
                torch.from_numpy(img).unsqueeze(0) for img in left_images_filled
            ])  # (V, 1, 96, 96)
            intrinsics_tensor = torch.stack([
                torch.from_numpy(K) for K in intrinsics_filled
            ])  # (V, 3, 3)
            extrinsics_tensor = torch.stack([
                torch.from_numpy(xf) for xf in extrinsics_xf
            ])  # (V, 4, 4)
            masks_tensor = torch.tensor(view_masks, dtype=torch.uint8)  # (V,)
            joints_tensor = torch.from_numpy(joint_angles)     # (J,)
            wrist_tensor = torch.from_numpy(wrist_xform)      # (4, 4)
            hand_idx_tensor = torch.tensor(hand_idx, dtype=torch.int64)

            # Build sample dict and save
            sample = {
                'images': images_tensor,
                'intrinsics': intrinsics_tensor,
                'extrinsics': extrinsics_tensor,
                'view_mask': masks_tensor,
                'joints': joints_tensor,
                'wrist': wrist_tensor,
                'hand_idx': hand_idx_tensor
            }
            sample_filename = os.path.join(
                samples_dir,
                f"{pre_name}_{frame_idx:06d}_hand{hand_idx}.pt"
            )
            torch.save(sample, sample_filename)

if __name__ == '__main__':
    data_root = r"/home/duyaoda/UmeTrack/UmeTrack/UmeTrack_data/raw_data"
    for t in os.listdir(data_root):
        if t == ".git":
            continue
        t_root = os.path.join(data_root, t)
        for n in os.listdir(t_root):
            n_root = os.path.join(t_root, n)
            train_path = os.path.join(n_root, "training")
            test_path = os.path.join(n_root, "testing")
            train_save_dir = os.path.join(SAVE_ROOT, "train")
            if not os.path.exists(train_save_dir):
                os.makedirs(train_save_dir)
            test_save_dir = os.path.join(SAVE_ROOT, "test")
            if not os.path.exists(test_save_dir):
                os.makedirs(test_save_dir)

            for u in os.listdir(train_path):
                print("当前用户:",u)
                u_root = os.path.join(train_path, u)
                for i in os.listdir(u_root):
                    i_path = os.path.join(u_root, i)
                    if i_path.endswith(".json"):
                        continue
                    else:
                        pre_name = "-".join([t,n,"train",u,i])
                        pre_name = os.path.splitext(pre_name)[0]
                        stream = SyncedImagePoseStream(i_path)
                        process_synced_stream(stream, train_save_dir, pre_name)

            for u in os.listdir(test_path):
                u_root = os.path.join(test_path, u)
                for i in os.listdir(u_root):
                    i_path = os.path.join(u_root, i)
                    if i_path.endswith(".json"):
                        continue
                    else:
                        pre_name = "-".join([t,n,"test",u,i])
                        pre_name = os.path.splitext(pre_name)[0]
                        stream = SyncedImagePoseStream(i_path)
                        process_synced_stream(stream, test_save_dir, pre_name)
