import copy
import time
from datetime import datetime
import random
from typing import Any, Dict, Optional
import numpy as np
import gymnasium as gym
from gymnasium import spaces
import cv2
import rtde_control
import rtde_receive
import sys,os

from gym_pih.hardware.asyn_realsense import Camera
from gym_pih.hardware.KWFT import ftsensor as FT300
from gym_pih.hardware.pid import PIDForceController
# from gym_pih.algorithm.depth_image_normal import compute_plane_normal

try:
    sys.path.append('/home/wk/RL-sim/FastSAM')
    from point2mask import Point2Mask
except Exception as e:
    print(e)
    print('需要FastSAM')

class ActImageEnhancement(gym.Env):
    def __init__(self,
                 target_pos: np.ndarray = np.array(
                 [0.47745133600881506, -0.0871746888476016, 0.1519502484440497, 0.008667017107691103, -3.127866260952955, 0.011422669020294273] ),
                 collect_data_mode: bool = False,
                 render: bool = True,):
        self.action_range=0.001/10   #动作范围1mm
        self.initial_error=0.01   #初始位置与孔的最大距离
        self.ministep_num=1       #每次动作分8步执行

        self.init_pose = np.zeros(6)
        self.target_pos = target_pos.copy()
        self.current_pose = np.zeros(6)
        self.terminated = False

        self.ip="192.168.3.140"
        self.velocity = 0.1
        self.acceleration = 0.05
        self.dt = 1.0/125  # 8ms
        self.lookahead_time = 0.1
        self.gain = 300

        self.action_space = spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32)
        self.collect_data_mode = collect_data_mode
        if self.collect_data_mode:
            self.observation_space = gym.spaces.Dict(
                {
                    "state": gym.spaces.Dict(
                        {
                            "displacement": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32),
                        }
                    ),
                    "images": gym.spaces.Dict(
                        {
                            "color": gym.spaces.Box(
                                low=0,
                                high=255,
                                shape=(128, 128, 3),
                                dtype=np.uint8,
                            ),
                            "depth": gym.spaces.Box(
                                low=0,
                                high=65535,
                                shape=(128, 128),
                                dtype=np.uint16,
                            ),
                        }
                    ),
                }
            )
        else:
            self.observation_space = gym.spaces.Dict(
                {
                    "state": gym.spaces.Dict(
                        {
                            "displacement": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32),
                            "area":gym.spaces.Box(low=-np.inf, high=np.inf, shape=(1,), dtype=np.float32),
                        }
                    ),
                    "images": gym.spaces.Dict(
                        {
                            "wrist": gym.spaces.Box(
                                low=0,
                                high=255,
                                shape=(128, 128, 3),
                                dtype=np.uint8,
                            ),
                        }
                    ),
                }
            )

        self.camera = Camera(640, 480, 60)
        self.camera.Open()
        self.ft = FT300(True)
        self.action_controller=PIDForceController(kp=self.action_range/self.ministep_num/4,
                                                  ki=self.action_range/self.ministep_num/100/150,
                                                  kd=self.action_range/self.ministep_num/4,
                                                  goalForce=-15,downward=False)
        self.rtde_c = rtde_control.RTDEControlInterface(self.ip)
        self.rtde_r = rtde_receive.RTDEReceiveInterface(self.ip)
        self.mask_maker = Point2Mask()
        self.render = render
        if self.render:
            cv2.namedWindow("color", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("color", 640, 640)

        wait_for_connection = False
        while not wait_for_connection:
            wait_for_connection, _ = self.camera.GetImage()
            time.sleep(1)

    def reset(self, seed: int = None, options: Optional[Dict[str, Any]] = None):
        self.rtde_c.servoStop()
        self.init_pose = self.target_pos.copy()
        self.init_pose[:2] += np.random.uniform(-self.initial_error,self.initial_error, 2)
        self.init_pose[2] += 0.015
        if self.terminated:
            pose = self.rtde_r.getActualTCPPose()
            pose[2] += 0.002
            self.rtde_c.moveL(pose, self.velocity, self.acceleration)
            pose[1] += np.random.uniform(0,0.03)
            self.rtde_c.moveL(pose, self.velocity, self.acceleration)

        self.rtde_c.moveL(self.init_pose, self.velocity, self.acceleration)
        self.current_pose = self.rtde_r.getActualTCPPose()
        self.terminated = False
        obs = self._get_obs()
        info = self._get_info()
        return obs,info

    def step(self, action: np.ndarray):
        action = np.clip(action, self.action_space.low, self.action_space.high)
        self._miniServoL(action*self.action_range,self.ministep_num)
        self.terminated = self.rtde_r.getDigitalInState(0)
        observation = self._get_obs()
        reward = 1 if self.terminated else 0
        info = self._get_info()

        return observation, reward, self.terminated, False, info

    def _get_obs(self):
        current_pose = self.rtde_r.getActualTCPPose()
        displacement = current_pose[:3] - self.init_pose[:3]
        displacement = np.array(displacement, dtype=np.float32)
        _, color_image = self.camera.GetImage()
        color_image = color_image[272:400,272:400]
        if self.render:
            cv2.imshow("color", color_image)
            cv2.waitKey(1)
        if self.collect_data_mode:
            _, depth_image = self.camera.GetDepthImage()
            depth_image = depth_image[272:400,272:400]
            return {
                "state": {
                    "displacement": displacement,
                },
                "images": {
                    "color": color_image,
                    "depth": depth_image,
                },
            }
        else:
            self.mask_maker.process_image(color_image)
            _, area = self.mask_maker.get_mask()
            mask_image = self.mask_maker.get_color_mask()
            img_scaled = np.clip(mask_image * 255, 0, 255)
            # 转换为 uint8 类型
            img_uint8 = img_scaled.astype(np.uint8)
            cv2.imshow("mask", img_uint8)
            area = np.array([area], dtype=np.float32)
            return {
                "state": {
                    "displacement": displacement,
                    "area": area,
                },
                "images": {
                    "wrist": img_uint8,
                },
            }

    def _miniServoL(self, action, step=4):
        """
        输入一次动作，分多步执行

        :param action: 动作
        :param step: 步数，默认为4
        """
        miniaction = action / step
        for _ in range(step):
            force=self.ft.GetForce()
            delta=self.action_controller._calPIDForce(force)
            # current_pose = self.rtde_r.getActualTCPPose()
            # current_pose[:3] += miniaction+delta[:3]
            self.current_pose[:3] += miniaction+delta[:3]
            t_start=self.rtde_c.initPeriod()
            self.rtde_c.servoL(self.current_pose, self.velocity, self.acceleration, self.dt, self.lookahead_time, self.gain)
            self.rtde_c.waitPeriod(t_start)

    def _get_info(self):
        return {}
    
    def close(self):
        self.ft.DisConnect()
        self.rtde_c.disconnect()
        self.rtde_r.disconnect()
        self.camera.Close()