import zarr
import argparse
import cv2
import os
import numpy as np
import torch
from typing import Union, Dict, Optional
import numcodecs
from termcolor import cprint
from tqdm import tqdm
from utils.cowa_data_util import *
from utils.imagecodecs_numcodecs import register_codecs
CURRENT_PATH = os.path.dirname(__file__)

register_codecs()

def replay(args):
    cowa_dataset = zarr.open(args.record_file, mode='r')
    episode_ends = cowa_dataset['/meta/episode_ends'][:]
    robot0_eef_pos = cowa_dataset['/data/robot0_eef_pos'][:]
    robot0_eef_rot_axis_angle = cowa_dataset['/data/robot0_eef_rot_axis_angle'][:]
    robot0_gripper_width = cowa_dataset['/data/robot0_gripper_width'][:]
    robot0_full_state = np.concat([robot0_eef_pos, robot0_eef_rot_axis_angle, robot0_gripper_width], axis=-1)

    obs_pose_repr = 'relative'

    # Loop over zarr datasets and write episodes to the LeRobot dataset
    # You can modify this for your own data format
    last_episode_end = 0
    task_description = "press the button on the top left of the coffee machine"
    for episode in tqdm(range(len(episode_ends)), desc="Processing episodes", unit="episode"):
        episode_states = robot0_full_state[last_episode_end:episode_ends[episode]]

        # test for inference
        action_relative = np.zeros((16, 9))

        for step in range(last_episode_end, episode_ends[episode]):
            episode_step = step - last_episode_end

            img = cowa_dataset['/data/camera0_rgb'][step]
            # state = sample_history(episode_states, episode_step, state_chunk_size)
            # action = robot0_full_state[step + 1] if step < len(robot0_full_state) - 1 else robot0_full_state[step]

            state_sample = sample_chunk(episode_states, episode_step, idx_relate=[-3, 0], idx_padding=0)
            action_sample = sample_chunk(episode_states, episode_step, 
                                        idx_relate=np.linspace(1, 16, 16).astype(np.int32),
                                        idx_padding=-1)
            
            pose_mat = pose_to_mat(state_sample[..., 0:6])

            obs_pose_mat = convert_pose_mat_rep(
                pose_mat, 
                base_pose_mat=pose_mat[-1],
                pose_rep=obs_pose_repr,
                backward=False)
            obs_pose = mat_to_pose10d(obs_pose_mat)
            obs = obs_pose.flatten()

            action_mat = pose_to_mat(action_sample[..., 0: 6])
            action_pose_mat = convert_pose_mat_rep(
                action_mat, 
                base_pose_mat=pose_mat[-1],
                pose_rep=obs_pose_repr,
                backward=False)
            action_pose = mat_to_pose10d(action_pose_mat)
            action_gripper = action_sample[..., 6: 7]
            action = np.concatenate([action_pose, action_gripper], axis=-1)

            # test for inference
            # 测试gr00t inference的时候从每帧相对位姿转换为对当前step的相对位姿
            action_relative = np.concatenate([action_relative[1:], action[0, :-1][np.newaxis, ...]], axis=0)
            if episode_step > 16:
                state_sample_past = sample_chunk(episode_states, episode_step - 15, idx_relate=[-3, 0], idx_padding=0)
                action_sample_past = sample_chunk(episode_states, episode_step - 15, 
                                                    idx_relate=np.linspace(1, 16, 16).astype(np.int32),
                                                    idx_padding=-1)
                pose_mat_past = pose_to_mat(state_sample_past[..., 0:6])
                action_mat_past = pose_to_mat(action_sample_past[..., 0: 6])
                action_pose_mat_past = convert_pose_mat_rep(
                    action_mat_past, 
                    base_pose_mat=pose_mat_past[-1],
                    pose_rep=obs_pose_repr,
                    backward=False)
                action_pose_past = mat_to_pose(action_pose_mat_past)
                
                action_pose10d_0relative = convert_iterative_action10d_to_0relative(action_relative)
                action_pose_mat_0relative = pose10d_to_mat(action_pose10d_0relative)
                action_pose_0relative = mat_to_pose(action_pose_mat_0relative)

                # current_pose = np.eye(4)
                # action_pose_0relative = np.zeros((16, 6))
                # for i in range(action_relative.shape[0]):
                #     action_pose10d = action_relative[i]
                #     action_pose_mat = pose10d_to_mat(action_pose10d)
                #     action_mat = convert_pose_mat_rep(action_pose_mat, 
                #                                         base_pose_mat=current_pose,
                #                                         pose_rep='relative',
                #                                         backward=True)
                #     action_pose = mat_to_pose(action_mat)
                #     action_pose_0relative[i] = action_pose
                #     current_pose = action_mat.copy()

                print(np.abs(action_pose_0relative - action_pose_past).mean())

        
            wait_time = 20
            if step == episode_ends[episode] - 1:
                print(f"====== Episode {episode} ended ======")
                wait_time = 2000
            # cv2.imshow('replay', img)
            # key = cv2.waitKey(wait_time)
            # if key == ord('q'):
            #     break

        last_episode_end = episode_ends[episode]


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--record_file', '-f', type=str,
                        default=os.path.join(CURRENT_PATH, "../data/0324button.zarr"),
                        help='Path to the record file')
    args = parser.parse_args()

    replay(args)