"""
An interface for collecting demos with keyboard.
"""

import time
import sys
import tty
import termios
import numpy as np
import pickle
import os
import select
import argparse
import signal
from panda_oyhand_env import PandaOYhandEnv
from pcd_process import robot2cam_params
from franka_home_real import ARM_HOME, HAND_HOME
from pathlib import Path
from traj2Points import Traj2Points, traj_offset_params
from termcolor import cprint
from save_file import save_pcd, save_img, save_ig_pcd
from pcd_process import pcd_crop
from vis_pcd import catpcd
from num_print import p
import zarr
def multiply_list_elements(input_list, multiplier):
    return [element * multiplier for element in input_list]

def auto_interpolate(states_array, xyz_diff_limit=None, rpy_diff_limit=None, known_exceed_step=None):
    assert len(states_array) >= 2, "States array must have at least 2 elements"
    if known_exceed_step is not None:
        assert 1 <= known_exceed_step < len(states_array), "known_exceed_step must be between 1 and len(states_array)-1"
        if xyz_diff_limit is None:
            xyz_diff_limit = np.linalg.norm(np.array(states_array[known_exceed_step-2][:3]) - np.array(states_array[known_exceed_step-1][:3]))
        if rpy_diff_limit is None:
            rpy_diff_prev = np.array(states_array[known_exceed_step-2][3:6])
            rpy_diff_curr = np.array(states_array[known_exceed_step-1][3:6])
            rpy_diff = np.abs(rpy_diff_curr - rpy_diff_prev)
            rpy_diff = np.minimum(rpy_diff, 2*np.pi - rpy_diff)
            rpy_diff_limit = np.linalg.norm(rpy_diff)
    assert xyz_diff_limit is not None, "xyz_diff_limit must be provided or known_exceed_step must be specified"
    assert rpy_diff_limit is not None, "rpy_diff_limit must be provided or known_exceed_step must be specified"
    
    result_states = [states_array[0]]
    for i in range(len(states_array) - 1):
        current_state = np.array(states_array[i])
        next_state = np.array(states_array[i + 1])
        
        xyz_diff_norm = np.linalg.norm(next_state[:3] - current_state[:3])
        rpy_diff_curr = current_state[3:6]
        rpy_diff_next = next_state[3:6]
        rpy_diff = np.abs(rpy_diff_next - rpy_diff_curr)
        rpy_diff = np.minimum(rpy_diff, 2*np.pi - rpy_diff)
        rpy_diff_norm = np.linalg.norm(rpy_diff)
        # p.print(f"i{i}, xyz_diff_norm", xyz_diff_norm)
        # p.print(f"i{i}, rpy_diff_norm", rpy_diff_norm)
        max_xyz_steps = max(1, int(np.ceil(xyz_diff_norm / xyz_diff_limit)))
        max_rpy_steps = max(1, int(np.ceil(rpy_diff_norm / rpy_diff_limit)))
        required_steps = max(max_xyz_steps, max_rpy_steps)
        
        if required_steps > 1:
            for j in range(1, required_steps):
                t = j / required_steps
                interpolated_state = current_state + t * (next_state - current_state)
                result_states.append(interpolated_state.tolist())
        
        result_states.append(next_state.tolist())
    
    return result_states

class CollectDemo:
    def __init__(self, save_args):
        self.save_args = save_args
        self.zarr_path='/home/dwl/DemonGen_dev/zarrtarj/grab_roller-demo_clean_p86-100-fgr2-ee-ta1-cam1-100-ee.zarr-trajlocal.zarr'
        self.env = PandaOYhandEnv(camera="D435")
        self.env.go_home()
        time.sleep(0.1)
        self.env.reset()
        self.save_base = "data/source_demos"
        self.save_dir = os.path.join(self.save_base, save_args.exp_name)
        os.makedirs(self.save_dir, exist_ok=True)
        self.save_path = os.path.join(self.save_dir, f"{save_args.traj_name}.pkl")
        self.to_use_teleop = True
        self.to_use_arm = True
        self.point_cloud_list = []
        self.image_list = []
        self.depth_list = []
        self.joint_state_list = []
        self.ee_state_list = []
        self.action_list = []
        self.default_arm_action = ARM_HOME.tolist()
        self.default_hand_action = [HAND_HOME]
        self.last_action = self.default_arm_action + self.default_hand_action
        self.arm_action = ARM_HOME.tolist()
        self.hand_action = [HAND_HOME]
        self.bind_cal_obj = robot2cam_params
        self.delta_scale = 1
        self.zarr_array = zarr.open(self.zarr_path, mode='r')
        self.episode_ends = self.zarr_array['meta']['episode_ends'][:].tolist()
        self.joint_states8D = self.zarr_array['qpos']['state'][:,:8]
        self.joint_states8D_ng=self.joint_states8D.copy()
        for state in self.joint_states8D_ng:
            state[7]=1
        signal.signal(signal.SIGINT, self.signal_handler)
    
    def signal_handler(self, signum, frame):
        cprint("Ctrl+C detected, saving state...", "yellow")
        self.save_state()
        sys.exit(0)
    
    def get_episode_timesteps(self, episode_idx):
        if isinstance(episode_idx, str):
            assert episode_idx.isdigit(), f"episode_idx: {episode_idx} is not a number"
            episode_idx = int(episode_idx)
        assert episode_idx >= 0, f"episode_idx: {episode_idx}"
        start_idx = 0 if episode_idx == 0 else self.episode_ends[episode_idx - 1]
        end_idx = self.episode_ends[episode_idx]
        return start_idx, end_idx
    def get_zarr_states(self, episode_idx):
        start_idx, end_idx = self.get_episode_timesteps(episode_idx)
        print(f"start_idx: {start_idx}, end_idx: {end_idx} for episode_idx: {episode_idx}")
        return self.joint_states8D[start_idx:end_idx]
    def get_qpos(self,episode_idx):
        zarr_states = self.get_zarr_states(episode_idx)
        return zarr_states
    
    def getch(self):
        fd = sys.stdin.fileno()
        old_settings = termios.tcgetattr(fd)
        try:
            tty.setraw(sys.stdin.fileno())
            first_char = sys.stdin.read(1)
            timeout = 0.5
            ready, _, _ = select.select([sys.stdin], [], [], timeout)
            if ready:
                second_char = sys.stdin.read(1)
                return first_char + second_char
            else:
                return first_char
        finally:
            termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
    
    def save_state(self):
        if len(self.point_cloud_list) > 0:
            point_cloud_arrays = np.stack(self.point_cloud_list, axis=0)
            image_arrays = np.stack(self.image_list, axis=0)
            depth_arrays = np.stack(self.depth_list, axis=0)
            ee_state_arrays = np.stack(self.ee_state_list, axis=0)
            joint_state_arrays = np.stack(self.joint_state_list, axis=0)
            action_arrays = np.stack(self.action_list, axis=0)
            data = {'point_cloud': point_cloud_arrays, 'image': image_arrays, 'depth': depth_arrays, 'ee_state': ee_state_arrays, 'delta_action': action_arrays, 'joint_state': joint_state_arrays}
            with open(self.save_path, 'wb') as f:
                pickle.dump(data, f)
            print("save data to: ", self.save_path)
        else:
            cprint("No demo data to save", "yellow")
        cprint("Saving calibration parameters...", "green")
        cprint("Saving robot2cam calibration parameters...", "green")
        robot2cam_params.save_calibration()
        robot2cam_params.get_calibration_summary()
        cprint("Saving traj offset calibration parameters...", "green")
        traj_offset_params.save_calibration()
        traj_offset_params.get_calibration_summary()
        return self.save_path
        

    def save_debug_info(self):
        # if len(self.point_cloud_list) >=2:
        #     return
        point_cloud, rgbd_frame = self.env.get_point_cloud_with_image()
        point_cloud = pcd_crop(point_cloud)
        # combined_pcd = catpcd(point_cloud, self.zarr_traj_pcd)
        # combined_pcd = point_cloud
        rgb = rgbd_frame[:, :, :3]
        depth = rgbd_frame[:, :, -1]
        # save_pcd("debug_pointcloud.pcd", point_cloud)
        # save_ig_pcd(point_cloud, title="Debug Point Cloud", figsize=(12, 10))
        print("step ",len(self.point_cloud_list))
        save_img("debug_rgb.png", rgb,surpress_print=False)
        # save_img("debug_depth.png", depth,surpress_print=True)
    def basicaction(self, arm_command):
        delta_range = 0.03
        delta_angle = 0.15
        delta_arm_action = [0, 0, 0, 0, 0, 0]
        if 'w' in arm_command:
            delta_arm_action = [delta_range, 0, 0, 0, 0, 0]
        if 's' in arm_command:
            delta_arm_action = [-delta_range, 0, 0, 0, 0, 0]
        if 'a' in arm_command:
            delta_arm_action = [0, delta_range, 0, 0, 0, 0]
        if 'd' in arm_command:
            delta_arm_action = [0, -delta_range, 0, 0, 0, 0]
        if 't' in arm_command:
            delta_arm_action = [0, 0, -delta_range, 0, 0, 0]
        if 'g' in arm_command:
            delta_arm_action = [0, 0, delta_range, 0, 0, 0]
        if ' ' in arm_command:
            delta_arm_action = [0, 0, 0, 0, 0, 0]
        if 'z' in arm_command:
            delta_arm_action = [0, 0, 0, 0, 0, -delta_angle]
        if 'x' in arm_command:
            delta_arm_action = [0, 0, 0, 0, 0, delta_angle]
        if 'c' in arm_command:
            delta_arm_action = [0, 0, 0, 0, -delta_angle, 0]
        if 'v' in arm_command:
            delta_arm_action = [0, 0, 0, 0, delta_angle, 0]
        if 'b' in arm_command:
            delta_arm_action = [0, 0, 0, -delta_angle, 0, 0]
        if 'n' in arm_command:
            delta_arm_action = [0, 0, 0, delta_angle, 0, 0]
        if any(c in arm_command for c in ['w', 's', 'a', 'd', 't', 'g', ' ', 'z', 'x', 'c', 'v', 'b', 'n']):
            self.arm_action = [i+j for i,j in zip(self.arm_action, delta_arm_action)]
        return delta_arm_action
    def offset_cal(self, arm_command, calib_object):
        if arm_command.isdigit() and int(arm_command)<9 and int(arm_command)>0:
            self.delta_scale=int(arm_command)
            cprint(f"Delta scale set to {self.delta_scale}", "green")
            return
        delta_rpy = 0.2 * self.delta_scale
        if 'r' in arm_command:
            calib_object.add_offset_rpy([delta_rpy, 0, 0])
        if 'f' in arm_command:
            calib_object.add_offset_rpy([-delta_rpy, 0, 0])
        if 'p' in arm_command:
            calib_object.add_offset_rpy([0, delta_rpy, 0])
        if ';' in arm_command:
            calib_object.add_offset_rpy([0, -delta_rpy, 0])
        if 'y' in arm_command:
            calib_object.add_offset_rpy([0, 0, delta_rpy])
        if 'h' in arm_command:
            calib_object.add_offset_rpy([0, 0, -delta_rpy])
        delta_pos = 0.01 * self.delta_scale
        if 'u' in arm_command:
            calib_object.add_offset_pos([delta_pos, 0, 0])
        if 'j' in arm_command:
            calib_object.add_offset_pos([-delta_pos, 0, 0])
        if 'i' in arm_command:
            calib_object.add_offset_pos([0, delta_pos, 0])
        if 'k' in arm_command:
            calib_object.add_offset_pos([0, -delta_pos, 0])
        if 'o' in arm_command:
            calib_object.add_offset_pos([0, 0, delta_pos])
        if 'l' in arm_command:
            calib_object.add_offset_pos([0, 0, -delta_pos])
    def log_exec_step(self, action):
        obs_dict, _, _, _ = self.env.step(np.array(action),qpos_drive=True)
        point_cloud = obs_dict['point_cloud']
        image = obs_dict['image']
        depth = obs_dict['depth']  
        ee_state = obs_dict['agent_pos']
        self.point_cloud_list.append(point_cloud)
        self.ee_state_list.append(ee_state)
        self.joint_state_list.append(self.env.get_joint_pos())
        self.image_list.append(image)
        self.depth_list.append(depth)
        delta_action = [i-j for i,j in zip(action, self.last_action)]
        self.action_list.append(delta_action)
        self.last_action = action
    
    def main(self):
        while True:
            cprint(f"Press 9 for robot2cam calibration, 0 for traj offset calibration Current: {'robot2cam' if self.bind_cal_obj == robot2cam_params else 'traj_offset'}", "magenta")
            arm_command = self.getch().lower()
            if arm_command == '`':
                break
            if arm_command == 'q':
                self.save_state()
                break
            cprint(f"your arm_command: {arm_command}", "green")
            if arm_command == '9':
                self.bind_cal_obj = robot2cam_params
                cprint("Switched to robot2cam calibration", "green")
                continue
            if arm_command == '0':
                self.bind_cal_obj = traj_offset_params
                cprint("Switched to traj offset calibration", "green")
                continue
            self.basicaction(arm_command)
            self.offset_cal(arm_command, self.bind_cal_obj)
        
            if '[' in arm_command:
                self.hand_action = [0.08]
            if ']' in arm_command:
                self.hand_action = [0.0]
            if 'e' in arm_command:
                for i, step in enumerate(auto_interpolate(self.joint_states8D_ng,known_exceed_step=5)):
                    self.env.set_qpos_hand(np.array(step))
                    self.save_debug_info()
                    # time.sleep(0.5)
                continue
            if '=' in arm_command:
                for i, step in enumerate(auto_interpolate(self.joint_states8D,known_exceed_step=5)):
                    self.log_exec_step(step)
                    self.save_debug_info()
                continue
            action = self.arm_action + self.hand_action
            cprint(f"your action: {action}", "cyan")
            self.log_exec_step(action)
            self.save_debug_info()
if __name__ == "__main__":
    args = argparse.ArgumentParser()
    args.add_argument("exp_name", type=str)
    args.add_argument("traj_name", type=str)
    args = args.parse_args()
    demo_collector = CollectDemo(args)
    demo_collector.main()