"""Pathology environment."""

import os

CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
import cv2
import numpy as np
import sapien
from gymnasium import spaces
from PIL import Image, ImageColor
from sapien import Pose
from sapien.utils.viewer import Viewer
from scipy.spatial.transform import Rotation as R

from pathology.controllers.pybullet_planner import PybulletPlanner
from pathology.controllers.slide_planner import SlidePlanner
from pathology.envs.base_env import SapienEnv
from pathology.envs.camera import Camera


class PathologyEnv(SapienEnv):
    def __init__(self, control_freq=10, timestep=0.01, headless=True):
        super().__init__(
            control_freq=control_freq, timestep=timestep, headless=headless
        )
        self.sim_time = 0.0

        self.robot = self.get_articulation("ur5")
        self.robot_links = self.robot.get_links()
        self.tool0 = self.robot.get_links()[11]
        self.robotiq = self.robot.get_links()[12]
        self.camera = self.robot.get_links()[13]
        self.slide_top = self.robot.get_links()[14]
        self.dof = self.robot.dof
        self.active_joints = self.robot.get_active_joints()
        self.cut = self.get_articulation("cut")
        self.kyd_tk = self.get_actor("kyd-tk")

        # The arm is controlled by the internal position drive
        for joint in self.active_joints:
            joint.set_drive_property(stiffness=90.0, damping=18)

        self.slide_planner = SlidePlanner(
            scene=self._scene,
            urdf=self.robot_urdf,
            pose_robotiq_to_tool0=self.tool0.get_pose().inv() * self.robotiq.get_pose(),
            pose_camera_to_tool0=self.tool0.get_pose().inv() * self.camera.get_pose(),
        )
        self.pybullet_planner = PybulletPlanner(
            scene=self._scene,
            urdf=os.path.join(self.assets_dir, "ur5/ur5_robotiq_slide_pybullet.urdf"),
        )

        self.hand_camera = Camera(
            scene=self._scene,
            name="hand_camera",
            width=848,
            height=480,
            fovy=np.deg2rad(60),
            near=0.05,
            far=100,
        )
        self.hand_camera.update_camera_pose(
            sapien.Pose(p=np.array([-2.0, 0.0, 1.5]), q=np.array([1.0, 0.0, 0.0, 0.0]))
        )

        self.observation_space = spaces.Box(
            low=-np.inf, high=np.inf, shape=[self.dof * 2 + 13], dtype=np.float32
        )
        self.action_space = spaces.Box(
            low=-1.0, high=1.0, shape=[self.dof], dtype=np.float32
        )

    # ---------------------------------------------------------------------------- #
    # Simulation world
    # ---------------------------------------------------------------------------- #
    def _build_world(self):
        self.assets_dir = os.path.join(CURRENT_DIR, "../assets")
        physical_material = self._scene.create_physical_material(1.0, 1.0, 0.0)
        self._scene.default_physical_material = physical_material
        self._scene.add_ground(0.0, render_material=[0.5, 0.5, 0.8])

        # KYD -TK
        builder = self._scene.create_actor_builder()
        builder.add_nonconvex_collision_from_file(
            filename=os.path.join(self.assets_dir, "pathology/visual/kyd-tk.glb"),
            pose=Pose(
                p=[0, 0, 0], q=R.from_euler("xyz", [0, 0, 0]).as_quat()[[3, 0, 1, 2]]
            ),
        )
        builder.add_visual_from_file(
            filename=os.path.join(self.assets_dir, "pathology/visual/kyd-tk.glb"),
            pose=sapien.Pose(
                p=[0, 0, 0],
                q=R.from_euler("xyz", [1.5708, 0, -1.5708]).as_quat()[[3, 0, 1, 2]],
            ),
        )
        kyd_tk = builder.build_kinematic(name="kyd-tk")
        kyd_tk.set_pose(sapien.Pose(p=[0.66, 0, 0.1]))

        # robot
        loader = self._scene.create_urdf_loader()
        loader.fix_root_link = True
        self.robot_urdf = os.path.join(self.assets_dir, "ur5/ur5_robotiq_slide.urdf")
        robot = loader.load(self.robot_urdf)
        robot.set_name("ur5")
        robot.set_root_pose(Pose(p=[0.0, 0.0, 0.0]))
        self.init_qpos = np.array([0, -1.5708, 0.9, -1.5708, -1.5708, 0])
        robot.set_qpos(self.init_qpos)
        for l in robot.links:
            l.disable_gravity = True

        # piece
        loader = self._scene.create_urdf_loader()
        loader.fix_root_link = True
        self.cut_urdf = os.path.join(self.assets_dir, "pathology/cut.urdf")
        cut = loader.load(self.cut_urdf)
        cut.set_name("cut")
        cut.set_root_pose(Pose(p=[0.6, 0.2, 0.2]))

    # ---------------------------------------------------------------------------- #
    # RL
    # ---------------------------------------------------------------------------- #
    def step(self, action):
        # pre-step
        pass

        # apply action
        for idx in range(6):
            self.active_joints[idx].set_drive_target(action[idx])
        for i in range(self.control_freq):
            self._scene.step()
            self.sim_time += self._scene.get_timestep()

        self.camera_results = self.hand_camera.get_camera_results(
            pose=self.camera.get_pose()
        )

        # post-step
        obs = self._get_obs()
        reward = self._get_reward()
        done = False
        if done:
            reward += 100.0

        return obs, reward, done, {}

    def reset(self):
        self.robot.set_qpos(self.init_qpos)

        self.sim_time = 0.0

        self._scene.step()
        self.sim_time += self._scene.get_timestep()
        self.camera_results = self.hand_camera.get_camera_results(
            pose=self.camera.get_pose()
        )

        return self._get_obs()

    def _get_obs(self):
        qpos = self.robot.get_qpos()
        qvel = self.robot.get_qvel()
        kyd_tk_pose = self.kyd_tk.get_pose()
        cut_pose = self.cut.get_pose()
        ee_pose = self.tool0.get_pose()
        robotiq_pose = self.robotiq.get_pose()
        camera_pose = self.camera.get_pose()
        slide_top = self.slide_top.get_pose()
        return {
            "qpos": qpos,
            "qvel": qvel,
            "kyd_tk_pose": kyd_tk_pose,
            "cut_pose": cut_pose,
            "ee_pose": ee_pose,
            "robotiq_pose": robotiq_pose,
            "camera_pose": camera_pose,
            "slide_top": slide_top,
            "cut_feature": self.camera_results["keypoint"]["key_pose"],
        }

    def _get_reward(self):
        return 0.0

    # ---------------------------------------------------------------------------- #
    # Visualization
    # ---------------------------------------------------------------------------- #
    def _setup_lighting(self):

        self._scene.set_ambient_light([0.4, 0.4, 0.4])
        self._scene.add_directional_light([1, -1, -1], [0.3, 0.3, 0.3])
        self._scene.add_directional_light([0, 0, -1], [1, 1, 1])

    def _setup_viewer(self):
        self._setup_lighting()
        self.viewer = Viewer()
        self.viewer.set_scene(self._scene)
        self.viewer.set_camera_xyz(x=1.5, y=0.0, z=2.0)
        self.viewer.set_camera_rpy(y=3.14, p=-0.5, r=0)

    def render(self, extra=None):
        if self.headless:
            return
        if self.viewer is None:
            self._setup_viewer()

        self.add_extra(extra)

        self._scene.update_render()
        if not self.viewer.closed:
            self.viewer.render()

        self.remove_extra(extra)

    def add_extra(self, extra):
        if extra is None:
            return

        if "slide_boundingbox" in extra:
            self.slide_box = self.viewer.add_bounding_box(
                pose=self.robot_links[-1].get_pose(),
                half_size=[0.1, 0.02, 0.002],
                color=[1, 0, 0],
            )
        if "camera" in extra:
            # self.hand_camera.disp_rgb(self.camera_results['rgb'])
            # self.hand_camera.disp_segmentation(self.camera_results['mesh_segmentation'], image_name='Mesh Segmentation')
            # self.hand_camera.disp_depth(self.camera_results['depth'], image_name='Depth')
            self.hand_camera.disp_keypoint(
                self.camera_results["keypoint"]["img"], image_name="Keypoint"
            )

    def remove_extra(self, extra):
        if extra is None:
            return

        if "slide_boundingbox" in extra:
            self.viewer.remove_bounding_box(self.slide_box)
        if "camera" in extra:
            pass


def main():
    headless = False
    env = PathologyEnv(headless=headless)
    env.reset()
    for episode in range(10):
        for step in range(10000):
            # action = env.action_space.sample()
            action = env.init_qpos + np.array(
                [0.5 * np.sin(env.sim_time * 0.1), 0, 0, 0, 0, 0]
            )
            obs, reward, done, info = env.step(action)

            camera_results = env.hand_camera.get_camera_results(
                types=["rgb", "depth", "mesh_segmentation"], pose=env.camera.get_pose()
            )
            if not headless:
                env.render()
                env.hand_camera.disp_rgb(camera_results["rgb"])
                env.hand_camera.disp_segmentation(
                    camera_results["mesh_segmentation"], image_name="Mesh Segmentation"
                )
                env.hand_camera.disp_depth(camera_results["depth"], image_name="Depth")

            if done:
                print(f"Done at step {step}")
                break
        obs = env.reset()

    env.close()
    cv2.destroyAllWindows()


if __name__ == "__main__":
    main()
