from franka_servo import FrankaPosServo
from slip_detector import SlipDeltaMeter
from dexhand_client import DexHandClient
from utils import ExpDataProcessor, StateManager, ResultManager
from dexhand_client.PyTac3D import Tac3D
from actor_critic import NaiveActor, PIActor, SlideMoldPIActor, RLActor
import time
import numpy as np
import os
import argparse
from threading import Thread

Tac3D_SN1 = "HDL1-GWH0024"
Tac3D_SN2 = "HDL1-GWH0021"
tac3d_gf_id1 = 1  # Tac3D对应的力传感器序号
tac3d_gf_id2 = 0  # Tac3D对应的力传感器序号


class ExpExecuter:
    def __init__(self, SN1, SN2, gf_id1, gf_id2, episode_num, speed_range = [0.002,0.002] ,online_training=False):
        """
        Initializes the ExpExecuter with necessary components for the experiment.
        Args:
            SN1 (str): Sensor ID for the first Tac3D sensor.
            SN2 (str): Sensor ID for the second Tac3D sensor.
            gf_id1 (int): Grasp force ID for the first Tac3D sensor.
            gf_id2 (int): Grasp force ID for the second Tac3D sensor.
            online_training (bool): Flag indicating if the processor is used for online training.
        """
        self.online_training = online_training
        self.Tac3D_SN1 = SN1
        self.Tac3D_SN2 = SN2
        self.SN_gf_dict = {SN1: gf_id1, SN2: gf_id2}
        self.record_flag = False
        self.tac3d = Tac3D(recvCallback=self.Tac3DRecvCallback)
        self.dexhand_client = DexHandClient(ip="192.168.2.100", port=60031)
        self.dexhand_client.start_server()
        self.franka_pos_servo = FrankaPosServo()

        self.exp_data_processor = ExpDataProcessor(SN1, SN2, self.SN_gf_dict)
        self.state_manager = StateManager(SN1, SN2)
        self.slip_detector = SlipDeltaMeter(online_training=online_training)
        self.actor = RLActor()
        self.result_manager = ResultManager()

        self.act_frequency = 30  # Hz, the frequency at which the actor will act
        self.speed_range = speed_range  # Range of grasping speed
        self.episode_num = episode_num  # Number of episodes to run in the experiment

        self.control_flag = False
        self.init_pos = None
        self.now_idx = None

        self.report_period = 1 # periods
        self.report_cnt = 0
        self.contact_pos = None


    def Tac3DRecvCallback(self, frame, param):
        if self.record_flag == False:
            return
        # 获取SN
        SN = frame["SN"]
        time_stamp = time.time()
        P = frame.get("3D_Positions")
        D = frame.get("3D_Displacements")
        F = frame.get("3D_Forces")
        tacdata = np.concatenate((P, D, F), axis=1)  # Concatenate position, displacement, and force data
        nowforce = self.dexhand_client.hand_info.now_force
        predicted_delta = self.slip_detector.data_to_delta(P, D, F)

        self.exp_data_processor.add_data(SN, time_stamp, tacdata, predicted_delta, nowforce[self.SN_gf_dict[SN]])

        self.state_manager.update_contact_area(SN, F)
        self.state_manager.update_tangential_force(SN, F)
        self.state_manager.update_contact_center(SN, F)

        self.actor.update_state(self.SN_gf_dict[SN], 
                                delta=np.clip(predicted_delta,0,1), 
                                fg = self.dexhand_client.hand_info.goal_force,
                                dft = self.state_manager.tangential_force_rate[SN],
                                ft = self.state_manager.tangential_force[SN],
                                ccx = self.state_manager.contact_center[SN][0] if self.state_manager.contact_center[SN] is not None else 0,
                                ccy = self.state_manager.contact_center[SN][1] if self.state_manager.contact_center[SN] is not None else 0,
                                )




    def initialize(self):
        """
        Prepares the experiment by initializing necessary components and starting the Tac3D sensor.
        """
        # self.dexhand_client.start_server()
        self.dexhand_client.acquire_hand()
        self.dexhand_client.clear_hand_error()
        self.dexhand_client.set_home()
        self.dexhand_client.calibrate_force_zero()
        self.tac3d.calibrate(self.Tac3D_SN1)
        self.tac3d.calibrate(self.Tac3D_SN2)
        self.init_pos = self.franka_pos_servo.robot_position

        self.result_manager.init_result(self.episode_num)

    def execute(self, idx):
        self._reset_exp_param(idx)

        st_time = time.time()
        self._actual_execute()
        self.exp_param["used_time"] = time.time() - st_time

        self._process_exp_data()

    def save_exp_data(self, goal_dir = "resultlib"):
        time_str = time.strftime("%Y%m%d_%H%M%S")
        dir = os.path.join(goal_dir, f"test_result_{time_str}")      
        if not os.path.exists(dir):
            os.makedirs(dir)
        print(f"[ExpExecuter] Saving test results to {dir}")

        config_path = os.path.join(dir, "config.txt")
        with open(config_path, 'w') as f:
            f.write(f"Used model in: {self.slip_detector.res_dir}\n")
            f.write(f"Controller: {self.actor.__class__.__name__}\n")
            f.write(f"Controller Parameters: {self.actor.params}\n")
            f.write(f"Act Frequency: {self.act_frequency} Hz\n")
            f.write(f"Grasping Speed Range: {self.speed_range[0]} m/s to {self.speed_range[1]} m/s\n")
            f.write(f"Experiment Episodes: {self.result_manager.episodes_num}\n")
            f.write(f"Success Count: {self.result_manager.success_cnt}\n")
            f.write(f"Success Rate: {self.result_manager.success_cnt / self.result_manager.episodes_num:.2%}\n")
            f.write(f"Save new data to {self.exp_data_processor.dataset_name}\n")

        self.result_manager.save_result(dir)
        self.result_manager.clear_result()
        self.actor.save_train_results(dir)
        

        try:
            self.actor.save_model()
        except Exception as e:
            print(f"[ExpExecuter] Error saving model: {e}")
    
    def _reset_exp_param(self,idx):
        self.now_idx = idx
        self.exp_param = {
            "exp_result": None,
            "used_time": None,
            "grasp_speed" : (self.speed_range[0] * (self.episode_num -1 - idx) + self.speed_range[1] * idx) / (self.episode_num - 1),  # Linear interpolation of grasp speed
        }
        # self.state_manager.reset()
        self.actor.restart()

    def _process_exp_data(self):
        mae_dict, est_mae, track_mae = self.exp_data_processor.get_mae(self.actor.goal_delta, self.exp_param["exp_result"])
        self.result_manager.result_push_in(
            episode_idx=self.now_idx, **self.exp_param, **mae_dict
        )
        
        dir = self.exp_data_processor.save_data(self.exp_param["exp_result"])
        self.exp_data_processor.add_actor_data(dir, self.actor.pass_exp_data())
        self.exp_data_processor.clear_data()
        print(f"[ExpExecuter] Experiment {self.now_idx} completed. Result: {self.exp_param['exp_result']}, "
                f"track_mae: {track_mae:.4f}, estimation_mae: {est_mae:.4f} ")

    def _actual_execute(self):
        self.dexhand_client.clear_hand_error()
        self.dexhand_client.contact()
        if self.contact_pos is None:
            self.contact_pos = self.dexhand_client.hand_info.now_pos -5 # mm
        self.record_flag = True
        self.dexhand_client.grasp(1, 1) # Ready to grasp, but not actually grasping yet

        # Use contact area to determine if contact is lost
        start_contact_center1 = self.state_manager.contact_center[self.Tac3D_SN1]
        start_contact_center2 = self.state_manager.contact_center[self.Tac3D_SN2]
        lost_cnt = 0
        def check_lost_contact():
            nonlocal start_contact_center1, start_contact_center2, lost_cnt
            now_contact_center1 = self.state_manager.contact_center[self.Tac3D_SN1]
            now_contact_center2 = self.state_manager.contact_center[self.Tac3D_SN2]
            if now_contact_center1 is None or now_contact_center2 is None:
                lost_cnt += 1
                if lost_cnt >= 5:
                    return True
                else:
                    return False
            lost_cnt = 0
            if start_contact_center1 is None or start_contact_center2 is None:
                start_contact_center1 = now_contact_center1 if start_contact_center1 is None else start_contact_center1
                start_contact_center2 = now_contact_center2 if start_contact_center2 is None else start_contact_center2
                return False
            

            if np.linalg.norm(start_contact_center1 - now_contact_center1) > 4 or np.linalg.norm(start_contact_center2 - now_contact_center2) > 4:           
                return True

            return False

        cmd = self.franka_pos_servo.gen_rela_move_cmd([0.0, 0.0, 0.03], max_speed=self.exp_param["grasp_speed"])
        self.franka_pos_servo.non_blocking_move(cmd)
        while True:
            goal_force = self.actor.act()
            self._try_push_log(goal_force = goal_force, delta = np.mean(self.actor.state["delta"]))
            self.dexhand_client.force_servo(goal_force)

            time.sleep(1 / self.act_frequency)

            done = False
            if self.franka_pos_servo.is_cmd_finished(cmd.cmd_id):
                done = True
                self.exp_param["exp_result"] = True
            if check_lost_contact():
                done = True
                print("[ExpExecuter] Experiment stopped due to contact loss during grasping. Grasp failed.")
                self.exp_param["exp_result"] = False
            
            # self.actor.update_state(0, fg=self.dexhand_client.hand_info.goal_force[0])
            # self.actor.update_state(1, fg=self.dexhand_client.hand_info.goal_force[1])
            state = self.actor.pack_state()
            self.actor.try_store_transition(state, done, self.exp_param["exp_result"])

            if done:
                break
            
        if self.exp_param["exp_result"] is True:
            self._slow_release_object(release_time=3.0)
            self.result_manager.success_cnt += 1

        self.record_flag = False

        # create a new thread to train the actor

        training_thread = Thread(target=self.actor.train)
        training_thread.start()

        self.dexhand_client.clear_hand_error()
        while self.dexhand_client.hand_info.error_flag:
            pass
        ret = self.dexhand_client.pos_goto(self.contact_pos)
        self.franka_pos_servo.blocking_move(self.franka_pos_servo.gen_abs_move_cmd(self.init_pos))

        training_thread.join() # Wait for the training thread to finish

    def _slow_release_object(self, release_time=5.0):
        st_time = time.time()
        elapsed_time = 0.0
        st_force = self.dexhand_client.hand_info.avg_force
        st_tangential_force = self.state_manager.avg_tangential_force

        def get_new_goal_force():
            nonlocal st_force, elapsed_time
            elapsed_time = time.time() - st_time
            return st_force - elapsed_time * (st_force - 1.0) / release_time

        while elapsed_time < release_time:
            now_tangential_force = self.state_manager.avg_tangential_force
            if now_tangential_force < st_tangential_force * 0.7:
                print("[ExpExecuter] Experiment stopped due to significant tangential force drop. Grasp succeeded.")
                break
            # st_tangential_force = max(st_tangential_force, now_tangential_force)

            goal_force = get_new_goal_force()
            self._try_push_log(goal_force = goal_force)
            self.dexhand_client.force_servo(goal_force)
            time.sleep(1 / self.act_frequency)
    
    def _try_push_log(self, **kwargs):
        self.report_cnt += 1
        if self.report_cnt >= self.report_period:
            self.report_cnt = 0
            log_str = f"[ExpExecuter] Episode {self.now_idx} - "
            for key, value in kwargs.items():
                log_str += f"{key}: {value:.3f}, "
            print(log_str)



if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Execute the grasping experiment with force controller.")
    parser.add_argument("--episode_num", type=int, default=20, help="Number of episodes to run in the experiment.")
    args = parser.parse_args()

    exp_executer = ExpExecuter(Tac3D_SN1, Tac3D_SN2, tac3d_gf_id1, tac3d_gf_id2, args.episode_num, online_training=False)
    exp_executer.initialize()
    
    for idx in range(args.episode_num):
        exp_executer.execute(idx)

    # print(f"[ExpExecuter] Error during execution of episode {idx}: {e}")
    # exp_executer.exp_param["exp_result"] = False
    # exp_executer.result_manager.result_push_in(episode_idx=idx, **exp_executer.exp_param)
    # exp_executer.exp_data_processor.clear_data()

    exp_executer.save_exp_data()
