from os import path as osp
from threading import Event, Lock, Thread, Timer
from typing import Dict, List, Tuple

import numpy as np
import pybullet as p

from pybullet_utils.bullet_client import BulletClient

from gymnasium import Env, spaces
from gymnasium.core import Any, RenderFrame, SupportsFloat
from gymnasium.envs.registration import EnvSpec


CONFIG_DEFAULT = {
    # 'reward_pick': 0.5,  # reward for successful object pick
    # 'reward_place': 0.5,  # reward for successful object place (to the taget location)
    'ratio_reward_pick_place': 0.5,  # pick vs place reward ratio (to sum up both to 1)
    'ratio_reward_metric_collision': 0.8,  # metric vs collision reward ratio (to sum up to -1)
    'ratio_reward_object_target': 0.5,  # object vs target reward ratio (metric reward)
    'threshold_reward_done': -0.015,  # task done condition (for metric reward in [0..-1])
    'threshold_reward_fail': -0.995,  # task fail condition (for metric reward in [0..-1])
    'trajectory_max_steps': 10,  # at max step trajectory will be truncated
    'object_min_distance': 0.15,  # object respawn min distance from world center (meters)
    'object_max_distance': 0.4,  # object respawn max distance from world center (meters)
}
CONSTANT_GRAVITY = 9.81


class ThorXEnvPyBullet(Env):
    metadata = {'render_modes': ['human', 'rgb_array']}

    def __init__(
        self,
        gui=False,
        depth=False,
        mask=False,
        absolute=True,
        delta=0.1,
        metric=True,
        visual=False,
        fps=10.0,
        manual=False,
        camera=(640, 480, 45),
        **kwargs,
    ) -> None:
        super().__init__()

        # Task-specific fields
        self.actions_absolute = absolute
        self.delta = delta
        self.reward_metric = metric
        self.fps = fps
        self.manual = manual
        # Default camera parameters
        self.camera_view = camera  # (width, height, field-of-view)
        self.camera_aspect = self.camera_view[0] * 1.0 / self.camera_view[1]
        self.camera_clip = (0.02, 10)  # (near, far) - clip range
        # Default config
        self.config = CONFIG_DEFAULT.copy()
        self.config.update(kwargs)
        print(f"CONFIG: {self.config}")

        # Config
        self.distance_object_min = self.config['object_min_distance']
        self.distance_object_max = self.config['object_max_distance']
        self.simulation = True  # TODO: gui
        self.terminated = False
        self.steps_max = self.config['trajectory_max_steps']
        self.ratio_reward_metric_collision = self.config[
            'ratio_reward_metric_collision'
        ]
        self.ratio_reward_object_target = self.config[
            'ratio_reward_object_target'
        ]
        self.threshold_reward_done = self.config['threshold_reward_done']
        print(
            f"DEBUG: reward done threshold (config) = {self.threshold_reward_done}"
        )
        self.threshold_reward_done *= self.ratio_reward_metric_collision
        print(
            f"DEBUG: reward done threshold (scaled) = {self.threshold_reward_done}"
        )
        self.threshold_reward_fail = self.config['threshold_reward_fail']
        print(
            f"DEBUG: reward fail threshold (config) = {self.threshold_reward_fail}"
        )
        self.threshold_reward_fail *= self.ratio_reward_metric_collision
        print(
            f"DEBUG: reward fail threshold (scaled) = {self.threshold_reward_fail}"
        )
        self.steps = 0

        # Gym interface
        self.action_space = spaces.Box(
            low=0, high=1, shape=(7,), dtype=np.float32
        )

        if visual:
            self.observation_space = spaces.Box(
                low=0, high=255, shape=(2, 224, 244, 3), dtype=np.uint8
            )  # 2 RGB images
        else:
            self.observation_space = spaces.Box(
                low=-1e5, high=1e5, shape=(11, 7), dtype=np.float32
            )
        self.observation_zero = np.zeros(
            self.observation_space.shape, self.observation_space.dtype
        )
        print(f"DEBUG: reward range (original) = {self.reward_range}")
        self.reward_range = (-1, 1)
        print(f"DEBUG: self spec = {self.spec}")
        self.spec = EnvSpec(
            id='ThorX-v0',
            max_episode_steps=self.steps_max,
            reward_threshold=self.threshold_reward_done,
        )
        print(f"DEBUG: self metadata = {self.metadata}")

        # Simulation: PyBullet
        self._client = BulletClient(p.GUI if gui else p.DIRECT)

        # Configure simulation
        # TODO: move to bullet_client
        self._client.setPhysicsEngineParameter(enableFileCaching=0)
        # self._client.setPhysicsEngineParameter(numSolverIterations=10)
        # self._client.setPhysicsEngineParameter(numSubSteps=4)
        # self._client.setTimeStep(1.0 / 500.0)
        self._client.configureDebugVisualizer(self._client.COV_ENABLE_GUI, gui)
        self._client.configureDebugVisualizer(
            self._client.COV_ENABLE_RGB_BUFFER_PREVIEW, gui
        )
        self._client.configureDebugVisualizer(
            self._client.COV_ENABLE_DEPTH_BUFFER_PREVIEW, gui and depth
        )
        self._client.configureDebugVisualizer(
            self._client.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, gui and mask
        )
        self._client.setGravity(0, 0, -CONSTANT_GRAVITY)
        self.flags = (
            self._client.URDF_USE_MATERIAL_COLORS_FROM_MTL
            | self._client.URDF_USE_MATERIAL_TRANSPARANCY_FROM_MTL
        )
        # Simulation default camera view
        self.martix_view = self._client.computeViewMatrixFromYawPitchRoll(
            (0.0, 0.0, 0.0), 2.5, 45, -45, 0, 2
        )  # TODO: move the parameters to config
        self.martix_projection = self._client.computeProjectionMatrixFOV(
            self.camera_view[-1], self.camera_aspect, *self.camera_clip
        )

        # Load models
        print(f"DEBUG: __file__ = {__file__}")
        self.models = self._client.loadSDF(
            osp.join(
                osp.dirname(__file__), "..", "models", "thorx", "model.sdf"
            )
        )
        self.id_thorx = self.models[0]
        self.id_cube = self.models[1]
        self.id_target = self.models[2]
        self.id_platform = self.models[3]
        self.id_frame = self.models[4]
        self.id_camera = self.models[5]
        self.id_laba = self.models[-1]

        # Reset dynamic objects position and orientation
        self._client.resetBasePositionAndOrientation(
            self.id_thorx, (0.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0)
        )
        # self._client.resetBasePositionAndOrientation(
        #     self.id_cube, (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)
        # )
        self._client.resetBasePositionAndOrientation(
            self.id_target, (0.5, 0.0, 1e-5), (0.0, 0.0, 0.0, 1.0)
        )
        # Pin static objects in the scene
        self.joint_thorx_fixed = self._client.createConstraint(
            self.id_thorx,
            -1,
            -1,
            -1,
            self._client.JOINT_FIXED,
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0, 1],
        )
        self.joint_platform_fixed = self._client.createConstraint(
            self.id_platform,
            -1,
            -1,
            -1,
            self._client.JOINT_FIXED,
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, -1e-5],
            [0, 0, 0, 1],
        )
        self.joint_frame_fixed = self._client.createConstraint(
            self.id_frame,
            -1,
            -1,
            -1,
            self._client.JOINT_FIXED,
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0, 1],
        )
        self.joint_camera_fixed = self._client.createConstraint(
            self.id_camera,
            -1,
            -1,
            -1,
            self._client.JOINT_FIXED,
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0, 1],
        )
        self.joint_laba_fixed = self._client.createConstraint(
            self.id_laba,
            -1,
            -1,
            -1,
            self._client.JOINT_FIXED,
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0, 1],
        )
        self._respawn_object()
        self._info_thorx = {}
        for i in range(self._client.getNumJoints(self.id_thorx)):
            info = self._client.getJointInfo(self.id_thorx, i)
            joint_name = info[1].decode('utf-8')
            if 'left' in joint_name:
                self._info_thorx['left'] = info
            elif 'right' in joint_name:
                self._info_thorx['right'] = info
            elif '6' in joint_name:
                self._info_thorx['6'] = info
            elif '5' in joint_name:
                self._info_thorx['5'] = info
            elif '4' in joint_name:
                self._info_thorx['4'] = info
            elif '3' in joint_name:
                self._info_thorx['3'] = info
            elif '2' in joint_name:
                self._info_thorx['2'] = info
            elif '1' in joint_name:
                self._info_thorx['1'] = info
            else:
                print(f"WARNING: unmatched joint = {info}")
        print(self._info_thorx)
        self.joint_indices = np.array(
            tuple(
                map(
                    lambda x: x[0],
                    (
                        self._info_thorx[k]
                        for k in [
                            '1',
                            '2',
                            '3',
                            '4',
                            '5',
                            '6',
                            'left',
                            'right',
                        ]
                    ),
                )
            )
        )
        self.joint_scales = np.array(
            tuple(
                map(
                    lambda x: x[8:10],
                    (
                        self._info_thorx[k]
                        for k in [
                            '1',
                            '2',
                            '3',
                            '4',
                            '5',
                            '6',
                            'left',
                            'right',
                        ]
                    ),
                )
            )
        )
        self.joint_scales = np.concatenate(
            (
                self.joint_scales[...],
                self.joint_scales[..., 1:2] - self.joint_scales[..., 0:1],
            ),
            axis=1,
        )
        print(f"DEBUG: joint indices = {self.joint_indices}")
        print(f"DEBUG: joint scales original =\n{self.joint_scales}")

        self.joint_finger_index = (
            6  # fingers start index (indices below are effectors)
        )
        # Coordinates state
        self.observation_coordinates = np.zeros(
            shape=(1 + 6 + 2 + 1 + 1, 7), dtype=np.float32
        )  # position xyz + orientation xyzw (ypr?)

        # Threading section
        self.lock = Lock()
        self.event_stop = Event()
        self.event_step = Event()  # timer for step method (bind to fps)
        self.timer_step = Event()
        self.thread_step = Thread(
            target=self.allow_step, kwargs={'delay': 1 / self.fps}
        )
        self.thread_step.daemon = True
        self.thread_update = Thread(target=self.update)
        self.thread_update.daemon = True

        # Start threads
        self.thread_update.start()
        self.thread_step.start()

    def act(self, action: Any) -> None:
        assert len(action) == self.action_space.shape[0], (
            f"Expected {self.action_space.shape[0]}-component action,"
            f" got {action}!"
        )
        with self.lock:
            try:
                joint_states = self._client.getJointStates(
                    self.id_thorx, self.joint_indices
                )
                joint_positions = np.array(
                    tuple(map(lambda x: x[0], joint_states))
                )
                # print(f"DEBUG: joint positions (start) = {joint_positions}")
                joint_actions = np.append(action, action[-1])
                # print(f"DEBUG: joint actions = {joint_actions}")
                joint_actions[...] *= self.joint_scales[..., 2]
                joint_actions[...] -= self.joint_scales[..., 2] / 2
                # print(f"DEBUG: joint actions (shifted) = {joint_actions}")
                if self.actions_absolute:
                    joint_positions[...] = joint_actions[...]
                else:
                    # Mean fingers position
                    fingers_average = min(
                        abs(joint_positions[self.joint_finger_index]),
                        abs(joint_positions[self.joint_finger_index + 1]),
                    )
                    # Normalization
                    joint_positions[self.joint_finger_index :] /= np.abs(
                        joint_positions[self.joint_finger_index :]
                    )
                    # print(f"DEBUG: joint positions (norm) = {joint_positions}")
                    joint_positions[self.joint_finger_index :] *= (
                        fingers_average
                    )
                    # print(f"DEBUG: joint positions (mean) = {joint_positions}")
                    joint_positions[...] += joint_actions[...] * self.delta
                # print(f"DEBUG: joint positions (final) = {joint_positions}")
                if self.simulation:
                    self._client.setJointMotorControlArray(
                        bodyUniqueId=self.id_thorx,
                        jointIndices=self.joint_indices,
                        controlMode=self._client.POSITION_CONTROL,
                        targetPositions=joint_positions,
                    )
                else:
                    # Reset the joint state (ignoring all dynamics,
                    # not recommended to use during simulation)
                    # TODO: DEBUG this branch
                    for index, position in zip(
                        self.joint_indices, joint_positions
                    ):
                        self._client.resetJointState(
                            self.id_thorx, index, position
                        )
            except self._client.error as ex:
                self.terminated = True
                print(ex)

    def allow_step(self, delay):
        while not self.event_stop.is_set():
            self.event_step.set()
            self.timer_step.wait(delay)

    @staticmethod
    def _distance_to_reward(
        distance: float, a: float = 0.6, b: float = 0.5
    ) -> float:
        return a * b / (abs(distance) + a) - b

    @staticmethod
    def _euclidean_distance(p1, p2):
        return np.linalg.norm(p1 - p2)

    def _get_frames(self):
        # (
        #     width,
        #     height,
        #     view_matrix,
        #     projection_matrix,
        # ) = (
        #     *self.camera_view[:2],
        #     self.martix_view,
        #     self.martix_projection,
        # )
        return self._client.getCameraImage(
            # width,
            # height,
            *self.camera_view[:2],
            # view_matrix,
            self.martix_view,
            # projection_matrix,
            self.martix_projection,
            shadow=1,
            lightDirection=[1, 1, 1],
        )  # (width, height, rgb, depth, mask)

    def _get_observation_coordinates(self):
        link_states_thorx = [
            sum(
                self._client.getBasePositionAndOrientation(self.id_thorx),
                tuple(),
            )
        ]
        # Since link index match joint index,
        # one can get link states via joint indices
        link_states_thorx += [
            ls[0] + ls[1]
            for ls in self._client.getLinkStates(
                self.id_thorx, self.joint_indices
            )
        ]
        # print("Link states (thorx):")
        # print(link_states_thorx)
        link_states_object = [
            sum(
                self._client.getBasePositionAndOrientation(self.id_cube),
                tuple(),
            )
        ]
        # print("Link states (object):")
        # print(link_states_object)
        link_states_target = [
            sum(
                self._client.getBasePositionAndOrientation(self.id_target),
                tuple(),
            )
        ]
        # print("Link states (target):")
        # print(link_states_target)
        self.observation_coordinates = np.array(
            link_states_thorx + link_states_target + link_states_object,
            dtype=np.float32,
        )
        # print(
        #     f"Observation (coordinates): {self.observation_coordinates.shape}"
        # )
        # print(self.observation_coordinates)

    def _get_reward_metric(self) -> float:
        reward_target = (
            self._distance_to_reward(
                self._euclidean_distance(
                    self.observation_coordinates[10][:3],  # cube
                    self.observation_coordinates[9][:3],  # platform
                )
            )
            / self.steps_max
        )

        reward_object = (
            (
                self._distance_to_reward(
                    self._euclidean_distance(
                        self.observation_coordinates[10][:3],  # cube
                        (
                            self.observation_coordinates[8][:3]
                            + self.observation_coordinates[7][:3]
                        )
                        / 2,  # gripper fingers center of mass
                    )
                )
            )
            / self.steps_max
        )

        return (
            (
                self.ratio_reward_object_target * reward_object
                + (1 - self.ratio_reward_object_target) * reward_target
            )
            * self.ratio_reward_metric_collision
            * 2
        )

    def _get_reward_collision(self) -> float:
        points = self._client.getClosestPoints(
            self.id_thorx,
            self.id_platform,
            distance=0,
            linkIndexA=self.joint_indices[-1],
        )
        points += self._client.getClosestPoints(
            self.id_thorx,
            self.id_platform,
            distance=0,
            linkIndexA=self.joint_indices[-2],
        )
        if points:
            reward = (
                -1 * (1 - self.ratio_reward_metric_collision) / self.steps_max
            )
        else:
            reward = 0.0
        return reward

    def _respawn_object(self):
        # This method must be wrapped into try...except block
        self._client.resetBasePositionAndOrientation(
            self.id_cube,
            (
                *(
                    (
                        np.random.random((2,))
                        * (self.distance_object_max - self.distance_object_min)
                        + self.distance_object_min
                    )  # distance [min, max]
                    * np.power(
                        np.ones((2,)) * -1, np.random.randint(0, 2, (2,))
                    )  # direction [+/-1, +/-1]
                ),
                0.1,
            ),
            (0.0, 0.0, 0.0, 1.0),
        )
        return None

    def observe(self):
        observation, reward, truncated, terminated, info = (
            self.observation_zero,
            0.0,
            False,
            False,
            {},
        )
        with self.lock:
            # TODO (observation): self._client.getCameraImage() from two views
            # TODO (reward): fill self.joint_states
            # TODO (info): put self.joint_states into info
            try:
                #
                # Observation
                #
                self._get_observation_coordinates()
                observation = self.observation_coordinates

                #
                # Reward
                #
                reward_metric = self._get_reward_metric()
                # print(
                #     f"DEBUG: reward (metric) = {reward_metric * self.steps_max}"
                # )
                reward_collision = self._get_reward_collision()
                # print(
                #     f"DEBUG: reward (collision) = {reward_collision * self.steps_max}"
                # )

                #
                # Truncated
                # TODO: replace with gymnasium.wrappers.time_limit.TimeLimit
                # from gymnasium.wrappers.time_limit import TimeLimit
                # TimeLimit(ThorXEnvPyBullet(), max_episode_steps=200)
                #
                self.steps += 1
                if self.steps >= self.steps_max:
                    truncated = True
                    self.steps = 0

                #
                # Terminated
                #

                if (
                    reward_metric > self.threshold_reward_done / self.steps_max
                ):  # suggested -0.01
                    terminated = True
                elif (
                    reward_metric < self.threshold_reward_fail / self.steps_max
                ):  # suggested -0.98
                    terminated = True
                    # reward_metric += (-0.98 / self.steps_max) * (
                    #     self.steps_max - self.steps
                    # )
                    # print(f"DEBUG: reward fail (metric) = {reward_metric}")
                #
                # Info
                #
                info = info
                reward = (reward_metric + reward_collision) / 2
                # print(f"DEBUG: reward (total) = {reward * self.steps_max}")
                if terminated or truncated:
                    self._respawn_object()
            except self._client.error as ex:
                self.terminated = True
                terminated = True
                print(ex)
        # Return: observation, reward, truncated, terminated, info
        return observation, reward, truncated, terminated, info

    def step(
        self, action: Any, render: bool = False
    ) -> Tuple[Any, SupportsFloat, bool, bool, Dict[str, Any]]:
        assert (
            isinstance(action, (spaces.Box, np.ndarray))
            and action.shape[0] == self.action_space.shape[0]
        ), (
            f"Expected {self.action_space.shape[0]}-component action,"
            f" got {type(action)}: {action}!"
        )
        """
        Simulation (desired): config joint motors for guaranteed reach target in one
        step (disable: mass, inertia, friction)
        Simulation (implemented): block step after action made and block until
        observation is ready (must be ready in reasonable FPS)
        Real (TODO): block agent until G-code status DONE
        """
        if not self.manual:
            self.act(action)
        # Bind to FPS (block until)
        self.event_step.wait()
        self.event_step.clear()
        observation, reward, truncated, terminated, info = self.observe()
        # result = super().step(action)
        result = (observation, reward, truncated, terminated, info)
        # print(len(result))  # FIXME: remove (debug only)
        if render:
            info.update(
                dict(
                    zip(
                        ('width', 'height', 'rgb', 'depth', 'mask'),
                        self._get_frames(),
                    )
                )
            )
        return result

    def reset(
        self, *, seed: int | None = None, options: Dict[str, Any] | None = None
    ) -> Tuple[Any, Dict[str, Any]]:
        observation = None
        with self.lock:
            try:
                self._get_observation_coordinates()
                observation = self.observation_coordinates
                self._respawn_object()
                # self._client.resetSimulation()
            except self._client.error as ex:
                # self.terminated = True
                print(f"ERROR: {ex}")
        self.steps = 0
        # self.terminated = False
        return observation, {}

    def render(self, mode='human') -> RenderFrame | List[RenderFrame] | None:
        if mode == 'human':
            # Render in simulation
            # TODO: camera position and orientation
            with self.lock:
                try:
                    self._client.resetDebugVisualizerCamera(
                        cameraDistance=1.5,
                        cameraYaw=45,
                        cameraPitch=-30,
                        cameraTargetPosition=[0, 0, 0],
                    )
                except self._client.error as ex:
                    self.terminated = True
                    print(ex)
        elif mode == 'rgb_array':
            # TODO: return RGB arrays from two cameras
            rgb = None
            with self.lock:
                try:
                    # width, height, view_matrix, projection_matrix, _, _ = (
                    #     self._client.getDebugVisualizerCamera()
                    # )
                    rgb = self._get_frames()[2]
                except self._client.error as ex:
                    self.terminated = True
                    print(ex)
            return rgb
        else:
            return super().render(mode=mode)

    def update(self):
        # Threaded method - call only thread-safe variables
        while not self.event_stop.is_set():
            with self.lock:
                try:
                    self._client.stepSimulation()
                except self._client.error:
                    ...

    def __del__(self):
        self.event_stop.set()
        try:
            self._client.disconnect()
        except self._client.error:
            ...


class ThorXAdapterRLlib(ThorXEnvPyBullet):
    def __init__(self, config: dict) -> None:
        super().__init__(**config)

    def step(
        self, action: Any, render: bool = False
    ) -> Tuple[Any | SupportsFloat | bool | Dict[str, Any]]:
        observation, reward, terminated, truncated, info = super().step(
            action, render=render
        )
        # print(
        #     f"DEBUG ThorX:\n{observation}\n{reward}\n{terminated}\n{truncated}\n{info}"
        # )
        return (
            observation,
            reward,
            terminated,
            truncated,
            info,
        )

    def reset(
        self, *, seed: int | None = None, options: Dict[str, Any] | None = None
    ) -> Tuple[Any | Dict[str, Any]]:
        observation, info = super().reset(seed=seed, options=options)
        return observation, info
