from collections import OrderedDict

import numpy as np
import sapien.core as sapien
from sapien.core import Pose
from transforms3d.euler import euler2quat, quat2euler

from mani_skill2.utils.registration import register_env
from mani_skill2.utils.sapien_utils import vectorize_pose

from .base_env import StationaryManipulationEnv
from mani_skill2.utils.sapien_utils import  get_entity_by_name

@register_env("CocubeGripper-v0", max_episode_steps=200)
class CocubeGripperEnv(StationaryManipulationEnv):
    goal_thresh = 0.01
    min_goal_dist = 0.05
    

    def __init__(self, *args, obj_init_rot_z=True, **kwargs):
        self.obj_init_rot_z = obj_init_rot_z
        self.cube_half_size = np.array([0.0125] * 3, np.float32)
        self.flag = False
        
        super().__init__(*args, **kwargs)

    def _load_actors(self):
        self._add_ground(render=self.bg_name is None)
        self.obj = self._build_cube(self.cube_half_size)
        self.goal_site = self._build_sphere_site(self.goal_thresh)

    def _initialize_actors(self):
        self.out_of_bound = False
        
        for i in range(100):
            xy = self._episode_rng.uniform(-0.08, 0.08, [2])
            xyz = np.hstack([xy, 0.002])
            q = [1, 0, 0, 0]
            if self.obj_init_rot_z:
                ori = self._episode_rng.uniform(0, 2 * np.pi)
                q = euler2quat(0, 0, ori)
            if  np.linalg.norm(xy - [-0.1, 0])>0.05:
                break
        xyz = np.array([-0.038, -0.080, 0.002])
        self.obj.set_pose(Pose(xyz, q))

    def _initialize_agent(self):
        super()._initialize_agent()
        # for i in range(100):
        #     xy = self._episode_rng.uniform(-0.1, 0.1, [2])
        #     xyz = np.hstack([xy, 0.002])
        #     # q = euler2quat(0, 0, self._episode_rng.uniform(0, 2 * np.pi))
        #     q = euler2quat(-np.pi/2, 0, self._episode_rng.uniform(0, 2 * np.pi))
            
        #     if(np.linalg.norm(xyz-self.obj.pose.p)>0.05):
        #         break
        # self.agent.robot.set_pose(Pose(xyz, q))

    def _initialize_task(self, max_trials=100, verbose=False):
        obj_pos = self.obj.pose.p

        # Sample a goal position far enough from the object
        for i in range(max_trials):
            goal_xy = self._episode_rng.uniform(-0.08, 0.08, [2])
            goal_z = self._episode_rng.uniform(0, 0.003)
            goal_pos = np.hstack([goal_xy, goal_z])
            if np.linalg.norm(goal_pos - obj_pos) > self.min_goal_dist:
                if verbose:
                    print(f"Found a valid goal at {i}-th trial")
                break
        goal_pos = np.array([0.001, -0.385, 0.002])
        self.goal_pos = goal_pos
        self.goal_site.set_pose(Pose(self.goal_pos))
    
    def get_tcp_pos(self):
        gripper_l = get_entity_by_name(
            self.agent.robot.get_links(), "gripper_l"
        )
        gripper_r = get_entity_by_name(
            self.agent.robot.get_links(), "gripper_r"
        )
        tcp_pos = (gripper_l.pose.p + gripper_r.pose.p) / 2
        return tcp_pos

    def set_robot_pos(self, pos, rot):
        set_pos = np.array([pos[0], pos[1], self.agent.robot.get_pose().p[2]])
        orin_rot = list(quat2euler(self.agent.robot.get_pose().q))
        orin_rot[-1] = rot
        self.agent.robot.set_pose(Pose(set_pos, euler2quat(orin_rot[0], orin_rot[1], orin_rot[2])))

        self.agent.robot.set_pose(Pose(set_pos, self.agent.robot.get_pose().q))
        if self.agent.robot.get_qpos()[-1]>0.05:
            self.flag = True
        if self.flag:

            obj_pos = np.array([pos[0]+0.075*np.cos(rot), pos[1]+0.075*np.sin(rot), self.obj.pose.p[2]])
            self.obj.set_pose(Pose(obj_pos, self.obj.pose.q))



    def _get_obs_extra(self) -> OrderedDict:
        obs = OrderedDict(
            tcp_pos=self.get_tcp_pos(),
            goal_pos=self.goal_pos,
        )
        if self._obs_mode in ["state", "state_dict"]:
            obs.update(
                tcp_to_goal_pos=self.goal_pos - self.get_tcp_pos(),
                obj_pos=self.obj.pose.p,
                tcp_to_obj_pos=self.obj.pose.p - self.get_tcp_pos(),
                obj_to_goal_pos=self.goal_pos - self.obj.pose.p,
            )
        return obs

    def check_obj_placed(self):
        return np.linalg.norm(self.goal_pos - self.obj.pose.p) <= self.goal_thresh

    def check_robot_static(self, thresh=0.2):
        # Assume that the last two DoF is gripper
        qvel = self.agent.robot.get_qvel()[:-2]
        
        return np.max(np.abs(qvel)) <= thresh

    def evaluate(self, **kwargs):
        is_obj_placed = self.check_obj_placed()
        is_robot_static = self.check_robot_static()
        return dict(
            is_obj_placed=is_obj_placed,
            # is_robot_static=is_robot_static,
            success=is_obj_placed,
        )
    

    def compute_dense_reward(self, info, **kwargs):
        reward = 0.0

        if info["success"]:
            reward += 5
            return reward

        tcp_to_obj_pos = self.obj.pose.p - self.get_tcp_pos()
        tcp_to_obj_dist = np.linalg.norm(tcp_to_obj_pos)
        # print("tcp_to_obj_dist", tcp_to_obj_dist)
        reaching_reward = 1 - np.tanh(5 * tcp_to_obj_dist)
        reward += reaching_reward

        reward+= 1 if tcp_to_obj_dist<0.022 else 0.0

        # is_grasped = self.agent.check_grasp(self.obj, max_angle=30)
        is_grasped = tcp_to_obj_dist<0.022 and self.agent.robot.get_qpos()[-1]>0.05
        reward += 1 if is_grasped else 0.0

        if is_grasped:
            goal_direction = (self.goal_pos - self.obj.pose.p)[:2]
            goal_direction /= np.linalg.norm(goal_direction)
            goal_direction = np.arctan2(goal_direction[1], goal_direction[0])
            tcp_direction = quat2euler(self.agent.robot.get_pose().q)[-1]
            

            direction_diff = np.abs(goal_direction - tcp_direction)
            direction_diff = np.minimum(direction_diff, 2 * np.pi - direction_diff)
            direction_reward = 1 - np.tanh(0.5 * direction_diff)
            # print("direction_reward", direction_reward)
            # reward += direction_reward

            # if direction_reward>0.8:
            obj_to_goal_dist = np.linalg.norm(self.goal_pos - self.obj.pose.p)
            # print("obj_to_goal_dist", obj_to_goal_dist)
            place_reward = (1 - np.tanh(20 * obj_to_goal_dist))*2
            reward += place_reward


        # tcp_pos = self.get_tcp_pos()
        if abs(self.get_tcp_pos()[0])>0.15 or abs(self.get_tcp_pos()[1])>0.15:
            self.out_of_bound = True
        if self.out_of_bound:
            reward = -5

        return reward

    def compute_normalized_dense_reward(self, **kwargs):
        return self.compute_dense_reward(**kwargs) / 5.0

    def render_human(self):
        self.goal_site.unhide_visual()
        ret = super().render_human()
        self.goal_site.hide_visual()
        return ret

    def render_rgb_array(self):
        self.goal_site.unhide_visual()
        ret = super().render_rgb_array()
        self.goal_site.hide_visual()
        return ret

    def get_state(self) -> np.ndarray:
        state = super().get_state()
        return np.hstack([state, self.goal_pos])

    def set_state(self, state):
        self.goal_pos = state[-3:]
        super().set_state(state[:-3])


