# python3.6

import copy
import random
import rtde_control
import rtde_receive
from paho.mqtt import client as mqtt_client
import numpy as np
import sys
import time
import threading
import datetime
import os
import pickle as pkl
import h5py
# from load_classifier_one_image import Classifier

from tqdm import tqdm
import cv2
import gym_pih

#from serl_launcher.wrappers.chunking import ChunkingWrapper
import gymnasium as gym
import traceback

# robot ip
ip = "192.168.3.140"     #真实机器人ip
# ip="192.168.237.129"     #ursim ip

#client parameters
broker = 'localhost'
port = 1883
topic = "ee_pose"
# generate client ID with pub prefix randomly
client_id = f'python-mqtt-{random.randint(0, 100)}'

#robot motion Parameters
velocity = 0.5
acceleration = 0.5
dt = 1.0/125  # 2ms
lookahead_time = 0.1
gain = 300

ratio_translation=0.2    #机器人末端平移与touch末端平移的比例
ratio_rotation=0.2       #机器人末端旋转与touch末端旋转的比例

# 设置计数器和更新频率
servo_count = 0
update_frequency = 100  # 每隔10次执行输出一次
start_time = 0
start_time = time.time()
distance=np.zeros(6)
button_state=0.0
pose=np.zeros(6)

#采样轨迹参数
count=0
transitions = []
success_count = 0
success_needed = 20
total_count = 0

difficulty = 0.5
max_timesteps = 400

uuid = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# file_name = f"peg_insert_1_difficulty_{success_needed}_demos_{uuid}.pkl"
# file_name = f"peg_insert_fail_{success_needed}_demos_{uuid}.pkl"
file_dir = os.path.dirname(os.path.realpath(__file__))  # same dir as this script
file_dir = os.path.join(file_dir,f"_difficulty_{difficulty}_episode_len_{max_timesteps}_"+uuid)
# file_dir = r"E:\study\data\serl\demostrations\sparse_reward"
# file_dir = r"E:\study\data\serl\demostrations\3"
# file_path = os.path.join(file_dir, file_name)

if not os.path.exists(file_dir):
    os.mkdir(file_dir)
# if os.path.exists(file_path):
#     raise FileExistsError(f"{file_name} already exists in {file_dir}")
# if not os.access(file_dir, os.W_OK):
#     raise PermissionError(f"No permission to write to {file_dir}")

# cv2.namedWindow("Wrist Image", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("Wrist Image", 320, 320)
# cv2.namedWindow("Side Image", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("Side Image", 320, 320)

def quaternion_to_rotation_vector(quaternion):
    """
    Convert quaternion to rotation vector.
    """
    q0, q1, q2, q3 = quaternion
    angle = 2 * np.arccos(q0)
    if angle < 1e-6:
        return np.zeros(3)
    axis = np.array([q1, q2, q3]) / np.sin(angle / 2)
    return angle * axis


def rotation_vector_to_quaternion(rotation_vector):
    """
    Convert rotation vector to quaternion.
    """
    angle = np.linalg.norm(rotation_vector)
    if angle < 1e-6:
        return np.array([1.0, 0.0, 0.0, 0.0])
    axis = rotation_vector / angle
    half_angle = angle / 2
    q0 = np.cos(half_angle)
    q1, q2, q3 = np.sin(half_angle) * axis
    return np.array([q0, q1, q2, q3])


def quaternion_multiply(q1, q2):
    """
    Multiply two quaternions.
    """
    w1, x1, y1, z1 = q1
    w2, x2, y2, z2 = q2

    w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
    x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
    y = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
    z = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2

    return np.array([w, x, y, z])

def connect_mqtt() -> mqtt_client:
    def on_connect(client, userdata, flags, rc):
        if rc == 0:
            print("Connected to MQTT Broker!")
        else:
            print("Failed to connect, return code %d\n", rc)

    client = mqtt_client.Client(client_id)
    client.on_connect = on_connect
    client.connect(broker, port)
    return client

# def subscribe(client: mqtt_client):
#     def on_message(client, userdata, msg):
#         print(f"Received `{msg.payload.decode()}` from `{msg.topic}` topic")

#     client.subscribe(topic)
#     client.on_message = on_message

def subscribe(client: mqtt_client,env):
    def on_message(client, userdata, msg):
        
        global success_count
        global transitions
        global obs
        global pbar
        global count
        global total_count

        message = np.array(
            list(map(float, msg.payload.decode().split())), dtype=float)
        delta_pose = message[0:6]
        button_state = message[6]
        
        if(button_state):
            # delta_pose[0:3] = delta_pose[0:3] * 0.001*ratio_translation        #touch末端位移,单位毫米，*0.001化为米
            actions=np.array([delta_pose[0],delta_pose[1],delta_pose[2]])
            actions=np.clip(actions,[-1,-1,-1],[1,1,1])
            next_obs, rew, terminated, truncated, info = env.step(actions)
            # rew=classifer.predict(next_obs)
            # rew=1 if rew>=0.5 else 0
            count+=1
            # cv2.imshow("Wrist Image", next_obs["wrist"])
            # cv2.imshow("Side Image", next_obs["side"])
            # cv2.waitKey(1)
            sys.stdout.write("\r"+ " " * 100 + "\r")
            sys.stdout.write(f"count:{count},rewards:{rew}")
            # sys.stdout.write(f",success_rate:{info['success_rate']},curriculum_difficulty:{info['curriculum_difficulty']}")
            sys.stdout.flush()

            transition = {
                "observations": obs,
                "actions": actions
            }

            transitions.append(transition)

            obs = next_obs

            if terminated :
                success_count += 1
                print(len(transitions))
                pbar.update(1)
                obs, _ = env.reset()
                # cv2.imshow("Wrist Image", obs["wrist"])
                # cv2.imshow("Side Image", obs["side"])
                max_timesteps = env.spec.max_episode_steps
                while len(transitions)<max_timesteps:
                    transitions.append(transition)

                data_dict = {
                    '/observations/displacement': [],
                    '/observations/force': [],
                    '/observations/slope': [],
                    '/action': [],
                    '/observations/images/wrist':[],
                    '/observations/images/side':[]
                }

                # len(joint_traj) i.e. actions: max_timesteps
                # len(episode_replay) i.e. time steps: max_timesteps + 1
                while transitions:
                    transition = transitions.pop(0)
                    data_dict['/observations/displacement'].append(transition['observations']['state']['displacement'])
                    data_dict['/observations/force'].append(transition['observations']['state']['force'])
                    data_dict['/observations/slope'].append(transition['observations']['state']['slope'])
                    data_dict['/action'].append(transition['actions'])
                    data_dict['/observations/images/wrist'].append(transition['observations']['images']['wrist'])
                    data_dict['/observations/images/side'].append(transition['observations']['images']['side'])

                # HDF5
                t0 = time.time()
                dataset_path = os.path.join(file_dir, f'episode_{success_count}')
                with h5py.File(dataset_path + '.hdf5', 'w', rdcc_nbytes=1024 ** 2 * 2) as root:
                    root.attrs['sim'] = True
                    observation = root.create_group('observations')
                    image = observation.create_group('images')
                    
                    _ = image.create_dataset('wrist', (max_timesteps, 128, 128, 3), dtype='uint8',
                                                 chunks=(1, 128, 128, 3), )
                    _ = image.create_dataset('side', (max_timesteps, 128, 128, 3), dtype='uint8',
                                                    chunks=(1, 128, 128, 3), )
                    # compression='gzip',compression_opts=2,)
                    # compression=32001, compression_opts=(0, 0, 0, 0, 9, 1, 1), shuffle=False)
                    displacement = observation.create_dataset('displacement', (max_timesteps, 3))
                    force = observation.create_dataset('force', (max_timesteps, 3))
                    slope = observation.create_dataset('slope', (max_timesteps, 1))
                    action = root.create_dataset('action', (max_timesteps, 3))

                    for name, array in data_dict.items():
                        root[name][...] = array

                transitions = []
                count = 0
                
            
            if truncated :
                obs,_ = env.reset()
                transitions = []
                count = 0

            if success_count >= success_needed:
                pbar.close()
                client.disconnect()

            
                
        
        

    client.subscribe(topic)
    client.on_message = on_message


def run():
    try:
        global success_count
        global transitions
        global obs
        global pbar
        global difficulty
        
        pbar = tqdm(total=success_needed)
        client = connect_mqtt()
        env=gym.make("act", curriculum = False,reward_shape="sparse",render = True,save_video=False,difficulty=difficulty)
        obs,_=env.reset()
        # cv2.imshow("Wrist Image", obs["wrist"])
        # cv2.imshow("Side Image", obs["side"])
        # cv2.waitKey(1)
        subscribe(client,env)
        client.loop_forever()
    

    except KeyboardInterrupt:

        print("KeyboardInterrupt")
    except Exception as e:
        print("Error:", e)
        traceback.print_exc()

    finally:
        env.reset()
        env.close()
        pbar.close()
        cv2.destroyAllWindows()
        sys.exit(0)

    


if __name__ == '__main__':
    run()

