# python3.6

import copy
import random
import rtde_control
import rtde_receive
from paho.mqtt import client as mqtt_client
import numpy as np
import sys
import time
import threading
import datetime
import os
import pickle as pkl
from load_classifier import Classifier
# from load_classifier_one_image import Classifier

from tqdm import tqdm
from ft300 import RobotiqFt300
from camera import Camera
from writecsv import CSVWriter
import cv2
import csv
from pid import PIDForceController
import gym_pih
from gym_pih.wrapper.record_success_rate import RecordSuccessRateWrapper
from serl_launcher.wrappers.serl_obs_wrappers import SERLObsWrapper
from franka_env.envs.wrappers import BinaryRewardClassifierWrapper
from serl_launcher.networks.reward_classifier import load_classifier_func
import jax

#from serl_launcher.wrappers.chunking import ChunkingWrapper
import gymnasium as gym

# robot ip
ip = "192.168.3.140"     #真实机器人ip
# ip="192.168.237.129"     #ursim ip

#client parameters
broker = 'localhost'
port = 1883
topic = "ee_pose"
# generate client ID with pub prefix randomly
client_id = f'python-mqtt-{random.randint(0, 100)}'

#robot motion Parameters
velocity = 0.5
acceleration = 0.5
dt = 1.0/125  # 2ms
lookahead_time = 0.1
gain = 300

ratio_translation=0.2    #机器人末端平移与touch末端平移的比例
ratio_rotation=0.2       #机器人末端旋转与touch末端旋转的比例

# 设置计数器和更新频率
servo_count = 0
update_frequency = 100  # 每隔10次执行输出一次
start_time = 0
start_time = time.time()
distance=np.zeros(6)
button_state=0.0
pose=np.zeros(6)

#采样轨迹参数
count=0
transitions = []
success_count = 0
success_needed = 20
total_count = 0

uuid = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
file_name = f"peg_insert_1_difficulty_{success_needed}_demos_{uuid}.pkl"
# file_name = f"peg_insert_fail_{success_needed}_demos_{uuid}.pkl"
# file_dir = os.path.dirname(os.path.realpath(__file__))  # same dir as this script
file_dir = r"E:\study\data\serl\demostrations\sparse_reward"
# file_dir = r"E:\study\data\serl\demostrations\3"
file_path = os.path.join(file_dir, file_name)

if not os.path.exists(file_dir):
    os.mkdir(file_dir)
if os.path.exists(file_path):
    raise FileExistsError(f"{file_name} already exists in {file_dir}")
if not os.access(file_dir, os.W_OK):
    raise PermissionError(f"No permission to write to {file_dir}")

cv2.namedWindow("Wrist Image", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Wrist Image", 320, 320)
cv2.namedWindow("Side Image", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Side Image", 320, 320)

def quaternion_to_rotation_vector(quaternion):
    """
    Convert quaternion to rotation vector.
    """
    q0, q1, q2, q3 = quaternion
    angle = 2 * np.arccos(q0)
    if angle < 1e-6:
        return np.zeros(3)
    axis = np.array([q1, q2, q3]) / np.sin(angle / 2)
    return angle * axis


def rotation_vector_to_quaternion(rotation_vector):
    """
    Convert rotation vector to quaternion.
    """
    angle = np.linalg.norm(rotation_vector)
    if angle < 1e-6:
        return np.array([1.0, 0.0, 0.0, 0.0])
    axis = rotation_vector / angle
    half_angle = angle / 2
    q0 = np.cos(half_angle)
    q1, q2, q3 = np.sin(half_angle) * axis
    return np.array([q0, q1, q2, q3])


def quaternion_multiply(q1, q2):
    """
    Multiply two quaternions.
    """
    w1, x1, y1, z1 = q1
    w2, x2, y2, z2 = q2

    w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
    x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
    y = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
    z = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2

    return np.array([w, x, y, z])

def connect_mqtt() -> mqtt_client:
    def on_connect(client, userdata, flags, rc):
        if rc == 0:
            print("Connected to MQTT Broker!")
        else:
            print("Failed to connect, return code %d\n", rc)

    client = mqtt_client.Client(client_id)
    client.on_connect = on_connect
    client.connect(broker, port)
    return client

#发送机器人末端姿态，用于计算touch力的方向
def publish(client: mqtt_client,ft:RobotiqFt300):
    global pose
    while True:
        msg=f"{pose[3]} {pose[4]} {pose[5]} {ft.forceTorque[0]} {ft.forceTorque[1]} {ft.forceTorque[2]} {ft.forceTorque[3]} {ft.forceTorque[4]} {ft.forceTorque[5]}"
        client.publish("robot_ee_pose&FT", msg)
        force=f"{ft.forceTorque[0]} {ft.forceTorque[1]} {ft.forceTorque[2]} {ft.forceTorque[3]} {ft.forceTorque[4]} {ft.forceTorque[5]}"
        client.publish("FT",force)
        time.sleep(0.001)


# def subscribe(client: mqtt_client):
#     def on_message(client, userdata, msg):
#         print(f"Received `{msg.payload.decode()}` from `{msg.topic}` topic")

#     client.subscribe(topic)
#     client.on_message = on_message

def subscribe(client: mqtt_client,env,classifer):
    def on_message(client, userdata, msg):
        
        global success_count
        global transitions
        global obs
        global pbar
        global count
        global total_count

        message = np.array(
            list(map(float, msg.payload.decode().split())), dtype=float)
        delta_pose = message[0:6]
        button_state = message[6]
        
        if(button_state):
            # delta_pose[0:3] = delta_pose[0:3] * 0.001*ratio_translation        #touch末端位移,单位毫米，*0.001化为米
            actions=np.array([delta_pose[0],delta_pose[1],delta_pose[2]])
            actions=np.clip(actions,[-1,-1,-1],[1,1,1])
            next_obs, rew, done, truncated, info = env.step(actions)
            # rew=classifer.predict(next_obs)
            # rew=1 if rew>=0.5 else 0
            count+=1
            cv2.imshow("Wrist Image", next_obs["wrist"])
            cv2.imshow("Side Image", next_obs["side"])
            cv2.waitKey(1)
            sys.stdout.write("\r"+ " " * 100 + "\r")
            sys.stdout.write(f"count:{count},rewards:{rew}")
            # sys.stdout.write(f",success_rate:{info['success_rate']},curriculum_difficulty:{info['curriculum_difficulty']}")
            sys.stdout.flush()

            transition = copy.deepcopy(
                dict(
                    observations=obs,
                    actions=actions,
                    next_observations=next_obs,
                    rewards=rew,
                    masks=1.0 - done,
                    dones=done,
                )
            )

            transitions.append(transition)

            obs = next_obs

            if done :
                success_count += 1
                total_count += 1
                print(
                    # f"{rew}\tGot {success_count} successes of {total_count} trials. {success_needed} successes needed."
                    f"switch_demo:{info['switch_demo']}"
                )
                pbar.update(1)
                obs, _ = env.reset()
                cv2.imshow("Wrist Image", obs["wrist"])
                cv2.imshow("Side Image", obs["side"])
            
            if truncated :
                obs,_ = env.reset()

            if success_count >= success_needed:
                pbar.close()
                client.disconnect()
                
        
        

    client.subscribe(topic)
    client.on_message = on_message


def run():
    try:
        global success_count
        global transitions
        global obs
        global pbar

        
        pbar = tqdm(total=success_needed)
        client = connect_mqtt()
        env=gym.make("serl/pih-v5", curriculum = True,reward_shape="sparse")
        env=SERLObsWrapper(env)
        env=RecordSuccessRateWrapper(env)
        classifier = Classifier(model_path=r"E:\study\serl_classifier\4\double_resnet_classifier_500_epochs_BCELoss()_0.0002_2024-05-16_23-43-19.pth")
        image_keys = [k for k in env.observation_space.keys() if "state" not in k]
        # current_path = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
        # rng = jax.random.PRNGKey(0)
        # rng, key = jax.random.split(rng)
        # classifier_func = load_classifier_func(
        #     key=key,
        #     sample=env.observation_space.sample(),
        #     image_keys=image_keys,
        #     checkpoint_path=current_path,
        # )
        # env=BinaryRewardClassifierWrapper(env, classifier_func)
        # #env = ChunkingWrapper(env, obs_horizon=1, act_exec_horizon=None)
        obs,_=env.reset()
        cv2.imshow("Wrist Image", obs["wrist"])
        cv2.imshow("Side Image", obs["side"])
        cv2.waitKey(1)
        # while success_count < 20:
        #     client.loop_start()
        #     subscribe(client,env)
        subscribe(client,env,classifier)
        client.loop_forever()
        

        with open(file_path, "wb") as f:
            pkl.dump(transitions, f)
            print(
                f"saved {success_needed} demos and {len(transitions)} transitions to {file_path}"
            )

    except KeyboardInterrupt:

        print("KeyboardInterrupt")
    except Exception as e:
        print("Error:",e)

    finally:
        env.reset()
        env.close()
        pbar.close()
        cv2.destroyAllWindows()
        sys.exit(0)

    


if __name__ == '__main__':
    run()

