from frankaservo import FrankaPosServo
from slip_detector import DeltaEstimator
from dexhand_client import DexHandClient
from utils import ExpDataProcessor, StateManager, ResultManager
from dexhand_client.PyTac3D import Tac3D
from actor_critic import NaiveActor, PIActor, SlideMoldPIActor, RLActor
import time
import numpy as np
import os
import argparse
from threading import Thread

Tac3D_SN1 = "HDL1-GWH0024"
Tac3D_SN2 = "HDL1-GWH0021"
tac3d_gf_id1 = 1  # Tac3D对应的力传感器序号
tac3d_gf_id2 = 0  # Tac3D对应的力传感器序号


class ExpExecuter:
    def __init__(
        self,
        SN1,
        SN2,
        gf_id1,
        gf_id2,
        episode_num,
        obj,
        lift_distance,
        speed_range=[0.002, 0.01],
        online_training=False,
    ):
        """
        Initializes the ExpExecuter with necessary components for the experiment.
        Args:
            SN1 (str): Sensor ID for the first Tac3D sensor.
            SN2 (str): Sensor ID for the second Tac3D sensor.
            gf_id1 (int): Grasp force ID for the first Tac3D sensor.
            gf_id2 (int): Grasp force ID for the second Tac3D sensor.
            online_training (bool): Flag indicating if the processor is used for online training.
        """

        self.Tac3D_SN1 = SN1
        self.Tac3D_SN2 = SN2
        self.SN_gf_dict = {SN1: gf_id1, SN2: gf_id2}

        self.record_flag = False
        self.tac3d = Tac3D(recvCallback=self.Tac3DRecvCallback)
        self.dexhand_client = DexHandClient(ip="192.168.2.100", port=60031)
        self.dexhand_client.start_server()
        self.franka_pos_servo = FrankaPosServo(dynamics_factor=0.7)

        self.exp_data_processor = ExpDataProcessor(SN1, SN2, self.SN_gf_dict, obj)
        self.state_manager = StateManager(SN1, SN2)
        self.slip_detector = DeltaEstimator(online_training=online_training)
        self.actor = SlideMoldPIActor()
        self.result_manager = ResultManager()

        self.obj = obj # Object to grasp
        self.lift_distance = lift_distance  # Distance to lift the object during grasping
        self.act_frequency = 30  # Hz, the frequency at which the actor will act
        self.speed_range = speed_range  # Range of grasping speed
        self.episode_num = episode_num  # Number of episodes to run in the experiment
        self.online_training = online_training # Whether to use online training

        
        self.control_flag = False
        self.init_pos = None
        self.now_idx = None

        self.report_period = 1  # periods
        self.report_cnt = 0
        self.contact_pos = None

    def Tac3DRecvCallback(self, frame, param):
        if self.record_flag == False:
            return
        # 获取SN
        SN = frame["SN"]
        time_stamp = time.time()
        P = frame.get("3D_Positions")
        D = frame.get("3D_Displacements")
        F = frame.get("3D_Forces")
        tacdata = np.concatenate((P, D, F), axis=1)  # Concatenate position, displacement, and force data
        nowforce = self.dexhand_client.hand_info.now_force
        st_time = time.time()
        predicted_delta = self.slip_detector.data_to_delta(P, D, F)
        elapsed_time = time.time() - st_time
        # print(f'elapsed time for inference: {elapsed_time:.4f} seconds predicted delta: {predicted_delta:.4f}')

        self.exp_data_processor.add_data(
            SN,
            time_stamp,
            tacdata,
            predicted_delta,
            nowforce[self.SN_gf_dict[SN]],
            self.dexhand_client.hand_info.goal_force,
        )

        self.state_manager.update_contact_area(SN, F)
        self.state_manager.update_tangential_force(SN, F)
        self.state_manager.update_contact_center(SN, F)

        self.actor.update_state(
            self.SN_gf_dict[SN],
            delta=np.clip(predicted_delta, 0, 1),
            fg=self.dexhand_client.hand_info.avg_force,
            dft=self.state_manager.tangential_force_rate[SN],
            ft=self.state_manager.tangential_force[SN],
            ccx=(self.state_manager.contact_center[SN][0] if self.state_manager.contact_center[SN] is not None else 0),
            ccy=(self.state_manager.contact_center[SN][1] if self.state_manager.contact_center[SN] is not None else 0),
        )

    def initialize(self):
        """
        Prepares the experiment by initializing necessary components and starting the Tac3D sensor.
        """
        # self.dexhand_client.start_server()
        self.dexhand_client.acquire_hand()
        self.dexhand_client.clear_hand_error()
        self.dexhand_client.set_home()
        self.dexhand_client.calibrate_force_zero()
        self.tac3d.calibrate(self.Tac3D_SN1)
        self.tac3d.calibrate(self.Tac3D_SN2)
        self.init_pos = self.franka_pos_servo.robot_position

        self.result_manager.init_result(self.episode_num, self.obj)

    def execute(self, idx):
        self._reset_exp_param(idx)

        st_time = time.time()
        self._actual_execute()
        self.exp_param["used_time"] = time.time() - st_time

        self._process_exp_data()

    def save_exp_data(self, goal_dir="resultlib"):
        time_str = time.strftime("%Y%m%d_%H%M%S")
        dir = os.path.join(goal_dir, f"test_result_{time_str}")
        if not os.path.exists(dir):
            os.makedirs(dir)
        print(f"[ExpExecuter] Saving test results to {dir}")

        config_path = os.path.join(dir, "config.txt")
        with open(config_path, "w") as f:
            f.write(f"Used model in: {self.slip_detector.res_dir}\n")
            f.write(f"Controller: {self.actor.__class__.__name__}\n")
            f.write(f"Controller Parameters: {self.actor.params}\n")
            f.write(f"Act Frequency: {self.act_frequency} Hz\n")
            f.write(f"Grasping Speed Range: {self.speed_range[0]} m/s to {self.speed_range[1]} m/s\n")
            f.write(f"Experiment Episodes: {self.result_manager.episodes_num}\n")
            f.write(f"Object: {self.obj}\n")
            f.write(f"Success Count: {self.result_manager.success_cnt}\n")
            f.write(f"Success Rate: {self.result_manager.success_cnt / self.result_manager.episodes_num:.2%}\n")
            f.write(f"Save new data to {self.exp_data_processor.dataset_name}\n")

        self.result_manager.save_result(dir)
        self.result_manager.clear_result()
        self.actor.save_train_results(dir)

        try:
            self.actor.save_model()
        except Exception as e:
            print(f"[ExpExecuter] Error saving model: {e}")

    def _reset_exp_param(self, idx):
        self.now_idx = idx
        self.exp_param = {
            "object": self.obj,
            "exp_result": None,
            "used_time": None,
            "grasp_speed": (self.speed_range[0] * (self.episode_num - 1 - idx) + self.speed_range[1] * idx)
            / (self.episode_num - 1),  # Linear interpolation of grasp speed
        }
        # self.state_manager.reset()
        self.actor.restart()

    def _process_exp_data(self):
        mae_dict, est_mae, track_mae = self.exp_data_processor.get_mae(
            self.actor.goal_delta, self.exp_param["exp_result"]
        )
        self.result_manager.result_push_in(episode_idx=self.now_idx, **self.exp_param, **mae_dict)

        dir = self.exp_data_processor.save_data(self.exp_param["exp_result"])
        self.exp_data_processor.add_actor_data(dir, self.actor.pass_exp_data())
        self.exp_data_processor.clear_data()
        print(
            f"[ExpExecuter] Experiment {self.now_idx} completed. Result: {self.exp_param['exp_result']}, "
            f"track_mae: {track_mae:.4f}, estimation_mae: {est_mae:.4f} "
        )

    def _actual_execute(self):
        """
        The main execution function for the experiment.
        It performs the following steps:
            1. Checks if the experiment should continue running.
            2. Clears any existing hand errors.
            3. Initiates contact with the object.
            4. Prepares for grasping by setting the contact position.
            5. Monitors the contact area to determine if contact is lost.
            6. Executes the grasping action with the Franka robot.
            7. Checks if the grasping was successful or if contact was lost.
            8. If successful, performs a slow release of the object.
            9. Trains the actor in a separate thread.
            10. Moves the hand back to the contact position after grasping.
            11. Waits for the training thread to finish.
        """
        self._check_in_run()  # Check if the experiment should continue running
        self.dexhand_client.clear_hand_error()
        self.dexhand_client.contact(preload_force = 0.5)
        if self.contact_pos is None:
            self.contact_pos = max(0.1, self.dexhand_client.hand_info.now_pos - 5)  # mm
        self.record_flag = True
        self.dexhand_client.grasp(0.5, 0.5)  # Ready to grasp, but not actually grasping yet

        # Use contact area to determine if contact is lost
        start_contact_center1 = self.state_manager.contact_center[self.Tac3D_SN1]
        start_contact_center2 = self.state_manager.contact_center[self.Tac3D_SN2]
        lost_cnt = 0

        def check_lost_contact():
            nonlocal start_contact_center1, start_contact_center2, lost_cnt
            now_contact_center1 = self.state_manager.contact_center[self.Tac3D_SN1]
            now_contact_center2 = self.state_manager.contact_center[self.Tac3D_SN2]
            if now_contact_center1 is None or now_contact_center2 is None:
                lost_cnt += 1
                if lost_cnt >= 5:
                    return True
                else:
                    return False
            lost_cnt = 0
            if start_contact_center1 is None or start_contact_center2 is None:
                start_contact_center1 = now_contact_center1 if start_contact_center1 is None else start_contact_center1
                start_contact_center2 = now_contact_center2 if start_contact_center2 is None else start_contact_center2
                return False

            if (
                np.linalg.norm(start_contact_center1 - now_contact_center1) > 4
                or np.linalg.norm(start_contact_center2 - now_contact_center2) > 4
            ):
                return True

            return False

        cmd = self.franka_pos_servo.gen_rela_move_cmd(
            [0.0, 0.0, self.lift_distance], max_speed=self.exp_param["grasp_speed"]
        )
        goal_pos = self.franka_pos_servo.non_blocking_move(cmd)
        check_freq = 30
        check_cnt = 0
        while True:
            check_cnt += 1
            if check_cnt >= check_freq:
                check_cnt = 0
                self._check_in_run()  # Check if the experiment should continue running

            goal_force = self.actor.act()
            self._try_push_log(goal_force=goal_force, delta=np.mean(self.actor.state["delta"]), goal_pos = goal_pos[2], now_pos = self.franka_pos_servo.robot_position[2])
            self.dexhand_client.force_servo(goal_force)
            time.sleep(1 / self.act_frequency)

            done = False
            if self.franka_pos_servo.is_cmd_finished(goal_pos):
                done = True
                self.exp_param["exp_result"] = True
            if check_lost_contact():
                done = True
                print("[ExpExecuter] Experiment stopped due to contact loss during grasping. Grasp failed.")
                self.exp_param["exp_result"] = False

            self.actor.try_store_transition(self.actor.pack_state(), done, self.exp_param["exp_result"])
            if done:
                break

        if self.exp_param["exp_result"] is True:
            self._slow_release_object(release_time=3.0)
            self.result_manager.success_cnt += 1

        self.record_flag = False

        # create a new thread to train the actor

        training_thread = Thread(target=self.actor.train)
        training_thread.start()

        self.dexhand_client.clear_hand_error()
        while self.dexhand_client.hand_info.error_flag:
            pass
        ret = self.dexhand_client.pos_goto(self.contact_pos)
        self.franka_pos_servo.robot.join_motion()
        self.franka_pos_servo.blocking_move(self.franka_pos_servo.gen_abs_move_cmd(self.init_pos))

        training_thread.join()  # Wait for the training thread to finish

    def _slow_release_object(self, release_time=6.0):
        st_time = time.time()
        elapsed_time = 0.0
        st_force = self.dexhand_client.hand_info.avg_force
        st_tangential_force = self.state_manager.avg_tangential_force

        def get_new_goal_force():
            nonlocal st_force, elapsed_time
            elapsed_time = time.time() - st_time
            return st_force - elapsed_time * (st_force - 0.35) / release_time

        while elapsed_time < release_time:
            now_tangential_force = self.state_manager.avg_tangential_force
            if now_tangential_force < st_tangential_force * 0.7:
                print("[ExpExecuter] Experiment stopped due to significant tangential force drop. Grasp succeeded.")
                break
            # st_tangential_force = max(st_tangential_force, now_tangential_force)

            goal_force = get_new_goal_force()
            self._try_push_log(goal_force=goal_force)
            self.dexhand_client.force_servo(goal_force)
            time.sleep(1 / self.act_frequency)

    def _try_push_log(self, **kwargs):
        self.report_cnt += 1
        if self.report_cnt >= self.report_period:
            self.report_cnt = 0
            log_str = f"[ExpExecuter] Episode {self.now_idx} - "
            for key, value in kwargs.items():
                if isinstance(value, np.ndarray):
                    value = value.tolist()  # Convert numpy arrays to lists for better readability
                if isinstance(value, float):
                    value = f"{value:.3f}"  # Format float values to 4 decimal
                log_str += f"{key}: {value}, "
            print(log_str)

    def _check_in_run(self):
        with open("run.txt", "r") as f:
            str = f.readline().strip()
            if not str.startswith("run"):
                raise RuntimeError("[ExpExecuter] Experiment stopped by user. Run flag is not 'run'.")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Execute the grasping experiment with force controller.")
    parser.add_argument(
        "--episode_num",
        type=int,
        default=20,
        help="Number of episodes to run in the experiment.",
    )
    parser.add_argument(
        "--obj",
        type=str,
        default="random",
        help="Object to grasp, default is 'random'.",
    )
    parser.add_argument(
        "--lift_distance",
        type=float,
        default=0.03,
        help="Distance to lift the object during grasping, default is 0.03 m.",
    )
    args = parser.parse_args()

    exp_executer = ExpExecuter(
        Tac3D_SN1,
        Tac3D_SN2,
        tac3d_gf_id1,
        tac3d_gf_id2,
        args.episode_num,
        args.obj,
        args.lift_distance,
        online_training=False,
    )
    exp_executer.initialize()


    try:
        for idx in range(args.episode_num):
            exp_executer.execute(idx)
    except Exception as e:
        import traceback as tb
        tb.print_exc()
        print(f"[ExpExecuter] Error during execution of episode {idx}: {e}")
        exp_executer.exp_param["exp_result"] = False
        exp_executer.result_manager.result_push_in(episode_idx=idx, **exp_executer.exp_param)
        exp_executer.exp_data_processor.clear_data()
        exp_executer.dexhand_client.pos_goto(0)

    exp_executer.save_exp_data()
