import numpy as np
from gym_multirotor import utils
from gym_multirotor.envs.mujoco.base_env import UAVBaseEnv


class X2Env(UAVBaseEnv):
    """
    Quadrotor with plus(+) configuration.
    Environment designed to make the UAV hover at the desired position.

    * Environment Name: QuadrotorPlusHoverEnv-v0

    Args:
        frame_skip (int): Number of frames to skip before application of next action command to the environment from the control policy.
    """

    def __init__(self, xml_name="scene.xml", frame_skip=5, env_bounding_box=1.2, randomize_reset=False):
        super().__init__(xml_name=xml_name, frame_skip=frame_skip,
                         env_bounding_box=env_bounding_box, randomize_reset=randomize_reset)

    @property
    def hover_force(self):
        """
        Hover force for each actuators in quadcopter.

        Returns:
            float: Hover force for each rotor.
        """
        return self.mass * self.gravity_mag * 0.25

    def get_motor_input(self, action):
        """
        Transform policy actions to motor inputs.

        Args:
            action (numpy.ndarray): Actions from policy of shape (4,).

        Returns:
            numpy.ndarray: Vector of motor inputs of shape (4,).
        """
        motor_range = 2.0
        motor_inputs = self.hover_force + action * motor_range / \
            (self.policy_range[1] - self.policy_range[0])
        return motor_inputs

    def step(self, action):
        """
        Method to take an action for agent transition in the environment

        Args:
            action (numpy.ndarray): Action vector. Expects each element of the action vector between [-1., 1.].

        Returns:
            tuple[numpy.ndarray, float, bool, dict]: Output tuple contains the following elements in the given order:
                - ob (numpy.ndarray): Observation vector.
                - reward (float): Scalar reward value.
                - done (bool): ``True`` if episode is over else ``False``.
                - info (dict): Dictionary of additional information from simulation if any.
        """

        self._time += 1
        a = self.clip_action(
            action, a_min=self.policy_range[0], a_max=self.policy_range[1])
        action_mujoco = self.get_motor_input(a)
        xyz_x2_0_position_before = self.get_body_com("x2_0")[:3].copy()
        xyz_x2_1_position_before = self.get_body_com("x2_1")[:3].copy()
        xyz_x2_2_position_before = self.get_body_com("x2_2")[:3].copy()
        xyz_x2_3_position_before = self.get_body_com("x2_3")[:3].copy()

        self.do_simulation(action_mujoco, self.frame_skip)
        self.sim.forward()
        xyz_x2_0_position_after = self.get_body_com("x2_0")[:3].copy()
        xyz_x2_1_position_after = self.get_body_com("x2_1")[:3].copy()
        xyz_x2_2_position_after = self.get_body_com("x2_2")[:3].copy()
        xyz_x2_3_position_after = self.get_body_com("x2_3")[:3].copy()

        xyz_x2_0_velocity = (xyz_x2_0_position_before -xyz_x2_0_position_after) / self.dt
        xyz_x2_1_velocity = (xyz_x2_1_position_before -xyz_x2_1_position_after) / self.dt
        xyz_x2_2_velocity = (xyz_x2_2_position_before -xyz_x2_2_position_after) / self.dt
        xyz_x2_3_velocity = (xyz_x2_3_position_before -xyz_x2_3_position_after) / self.dt

        x_x2_0_velocity, y_x2_0_velocity, z_x2_0_velocity = xyz_x2_0_velocity
        x_x2_1_velocity, y_x2_1_velocity, z_x2_1_velocity = xyz_x2_1_velocity
        x_x2_2_velocity, y_x2_2_velocity, z_x2_2_velocity = xyz_x2_2_velocity
        x_x2_3_velocity, y_x2_3_velocity, z_x2_3_velocity = xyz_x2_3_velocity

        ob = self._get_obs()

        ob[67] = x_x2_0_velocity
        ob[68] = y_x2_0_velocity
        ob[69] = z_x2_0_velocity

        ob[70] = x_x2_1_velocity
        ob[71] = y_x2_1_velocity
        ob[72] = z_x2_1_velocity

        ob[73] = x_x2_2_velocity
        ob[74] = y_x2_2_velocity
        ob[75] = z_x2_2_velocity

        ob[76] = x_x2_3_velocity
        ob[77] = y_x2_3_velocity
        ob[78] = z_x2_3_velocity


        self.current_robot_observation = ob.copy()

        reward, reward_info = self.get_reward(ob, a)

        info = {"reward_info": reward_info,
                "desired_goal": self.desired_position.copy(),
                "mujoco_qpos": self.mujoco_qpos,
                "mujoco_qvel": self.mujoco_qvel}

        done = self.is_done(ob)
        if self.observation_noise_std:
            ob += self.np_random.uniform(low=-self.observation_noise_std,
                                         high=self.observation_noise_std, size=ob.shape)
        return ob, reward, done, info

    def _get_obs(self):
        """
        Full observation of the environment.

        Returns:
            numpy.ndarray: 18-dim numpy array of states of environment consisting of (err_x, err_y, err_z, rot_mat(3, 3), vx, vy, vz, body_rate_x, body_rate_y, body_rate_z)
        """

        qpos = self.sim.data.qpos.copy()
        # print(qpos.shape)

        qvel = self.sim.data.qvel.copy()
        #print("qvel:", qvel)
        print("qpos:", qpos)
        qpos[:3] = self.get_body_com("x2_0")[:3].copy()
        #qpos[7:11] = self.get_body_com("x2_0")[7:11].copy() # joint_0

        qpos[11:14] = self.get_body_com("x2_1")[:3].copy()
        # qpos[18:22] = self.get_body_com("x2_1")[7:11].copy() # joint_1

        qpos[22:25] = self.get_body_com("x2_2")[:3].copy()
        # qpos[29:33] = self.get_body_com("x2_2")[7:11].copy() # joint_2

        qpos[33:36] = self.get_body_com("x2_3")[:3].copy()
        # qpos[40:44] = self.get_body_com("x2_3")[7:11].copy() # joint_3

        qpos[44:47] = self.get_body_com("load")[:3].copy()
        # qpos[51:55] = self.get_body_com("load")[7:11].copy() # joint_l0
        # qpos[55:59] = self.get_body_com("load")[11:15].copy() # joint_l1
        # qpos[59:63] = self.get_body_com("load")[15:19].copy() # joint_l2
        # qpos[63:67] = self.get_body_com("load")[19:23].copy() # joint_l3
        

        # position error
        print(qpos[0:3].shape)
        print(qpos[3:7].shape)
        e_x2_0_pos = qpos[0:3] - self.desired_position * 1.414 + 1
        e_x2_1_pos = qpos[11:14] - self.desired_position * (-1.414)-1
        e_x2_2_pos = qpos[22:25] - self.desired_position * (-1.414)+1
        e_x2_3_pos = qpos[33:36] - self.desired_position * 1.414-1
        e_load_pos = qpos[44:47] - self.desired_position 

        e_pos = np.concatenate([e_x2_0_pos,e_x2_1_pos,e_x2_2_pos,e_x2_3_pos,e_load_pos])


        if self.current_x2_0_quat is not None:
            self.previous_x2_0_quat = self.current_x2_0_quat.copy()       # rotation matrix
        if self.current_x2_1_quat is not None:
            self.previous_x2_1_quat = self.current_x2_1_quat.copy()       # rotation matrix
        if self.current_x2_2_quat is not None:
            self.previous_x2_2_quat = self.current_x2_2_quat.copy()       # rotation matrix
        if self.current_x2_3_quat is not None:
            self.previous_x2_3_quat = self.current_x2_3_quat.copy()       # rotation matrix

        x2_0_quat = np.array(qpos[3:7])
        x2_1_quat = np.array(qpos[14:18])
        x2_2_quat = np.array(qpos[25:29])
        x2_3_quat = np.array(qpos[29:33])
        load_quat = np.array(qpos[47:51])

        self.current_x2_0_quat = np.array(x2_0_quat)
        self.current_x2_1_quat = np.array(x2_1_quat)
        self.current_x2_2_quat = np.array(x2_2_quat)
        self.current_x2_3_quat = np.array(x2_3_quat)
        self.current_load_quat = np.array(load_quat)


        # rotation matrix
        x2_0_rot_mat = utils.quat2rot(x2_0_quat)
        x2_1_rot_mat = utils.quat2rot(x2_1_quat)
        x2_2_rot_mat = utils.quat2rot(x2_2_quat)
        x2_3_rot_mat = utils.quat2rot(x2_3_quat)
        x2_load_rot_mat = utils.quat2rot(load_quat)

        x2_rot_mat = np.concatenate([x2_0_rot_mat,x2_1_rot_mat,x2_2_rot_mat,x2_3_rot_mat,x2_load_rot_mat])
       
        # velocity of joints
        vel_0 = np.array(self.sim.data.get_joint_qvel("joint_0")[:3])
        vel_1 = np.array(self.sim.data.get_joint_qvel("joint_1")[:3])
        vel_2 = np.array(self.sim.data.get_joint_qvel("joint_2")[:3])
        vel_3 = np.array(self.sim.data.get_joint_qvel("joint_3")[:3])
        vel_l0 = np.array(self.sim.data.get_joint_qvel("joint_l0")[:3])
        vel_l1 = np.array(self.sim.data.get_joint_qvel("joint_l1")[:3])
        vel_l2 = np.array(self.sim.data.get_joint_qvel("joint_l2")[:3])
        vel_l3 = np.array(self.sim.data.get_joint_qvel("joint_l3")[:3])

        vel = np.concatenate([vel_0,vel_1,vel_2,vel_3,vel_l0,vel_l1,vel_l2,vel_l3])

        # angular_vel = np.array(self.sim.data.get_body_xvelr("core"))

        angular_vel_0 = np.array(self.sim.data.get_joint_qvel("joint_0")[3:6])     # angular velocity of core of the robot in body frame.
        angular_vel_1 = np.array(self.sim.data.get_joint_qvel("joint_1")[3:6])
        angular_vel_2 = np.array(self.sim.data.get_joint_qvel("joint_2")[3:6])
        angular_vel_3 = np.array(self.sim.data.get_joint_qvel("joint_3")[3:6])
        angular_vel_l0 = np.array(self.sim.data.get_joint_qvel("joint_l0")[3:6])
        angular_vel_l1 = np.array(self.sim.data.get_joint_qvel("joint_l1")[3:6])
        angular_vel_l2 = np.array(self.sim.data.get_joint_qvel("joint_l2")[3:6])
        angular_vel_l3 = np.array(self.sim.data.get_joint_qvel("joint_l3")[3:6])

        angular_vel = np.concatenate([angular_vel_0,angular_vel_1,angular_vel_2,angular_vel_3,angular_vel_l0,angular_vel_l1,angular_vel_l2,angular_vel_l3])
        self.mujoco_qpos = np.array(qpos)
        self.mujoco_qvel = np.array(qvel)

        return np.concatenate([e_pos,x2_rot_mat.flatten(),vel,angular_vel])

    def get_reward(self, ob, a):
        """
        Method to evaluate reward based on observation and action

        Args:
            ob (numpy.ndarray): Observation vector.
            a (numpy.ndarray): Action vector of shape (4,).

        Returns:
            tuple[float, dict]: Tuple containing follwing elements in the given order:
                - reward (float): Scalar reward based on observation and action.
                - reward_info (dict): Dictionary of reward for specific state values. This dictionary contains the reward values corresponding to the following keys - (position, orientation, linear_velocity, angular_velocity, action, alive_bonus, extra_bonus, extra_penalty).
        """

        alive_bonus = self.reward_for_staying_alive

        reward_x2_0_position = self.norm(ob[0:3]) * (-self.position_reward_constant)
        reward_x2_1_position = self.norm(ob[11:14]) * (-self.position_reward_constant)
        reward_x2_2_position = self.norm(ob[22:25]) * (-self.position_reward_constant)
        reward_x2_3_position = self.norm(ob[33:36]) * (-self.position_reward_constant)
        reward_load_position = self.norm(ob[44:47]) * (-self.position_reward_constant)
        reward_position = reward_x2_0_position + reward_x2_1_position+reward_x2_2_position+reward_x2_1_position+reward_x2_3_position+reward_load_position
        
        reward_x2_0_orientation = self.orientation_error(self.sim.data.qpos[3:7]) * (-self.orientation_reward_constant)
        reward_x2_1_orientation = self.orientation_error(self.sim.data.qpos[14:18]) * (-self.orientation_reward_constant)
        reward_x2_2_orientation = self.orientation_error(self.sim.data.qpos[25:29]) * (-self.orientation_reward_constant)
        reward_x2_3_orientation = self.orientation_error(self.sim.data.qpos[36:40]) * (-self.orientation_reward_constant)
        reward_load_orientation = self.orientation_error(self.sim.data.qpos[47:51]) * (-self.orientation_reward_constant)
        reward_orientation = reward_x2_0_orientation + reward_x2_1_orientation + reward_x2_2_orientation+reward_x2_3_orientation+reward_load_orientation

        reward_action = self.norm(a) * (-self.action_reward_constant)

        extra_bonus = self.bonus_reward_to_achieve_goal(
            ob[:3]) + self.bonus_reward_to_achieve_goal(
            ob[11:14])+self.bonus_reward_to_achieve_goal(
            ob[22:25])+self.bonus_reward_to_achieve_goal(
            ob[33:36])+self.bonus_reward_to_achieve_goal(
            ob[44:47])  # EXTRA BONUS TO ACHIEVE THE GOAL

        # PENALTY for bound violation
        extra_penalty = - self.bound_violation_penalty(ob[:3]) + self.bonus_reward_to_achieve_goal(
            ob[11:14])-self.bonus_reward_to_achieve_goal(
            ob[22:25])-self.bonus_reward_to_achieve_goal(
            ob[33:36])-self.bonus_reward_to_achieve_goal(
            ob[44:47])

        reward_velocity_towards_goal = 0.0
        # reward agent to move towards goal if system is away from goal
        # if self.norm(ob[0:3]) > self.error_tolerance_norm:
        #     reward_velocity_towards_goal += self.reward_velocity_towards_goal(
        #         error_xyz=ob[:3], velocity=ob[12:15])

        rewards = (reward_position, reward_orientation,
                   reward_action, alive_bonus, extra_bonus, extra_penalty, reward_velocity_towards_goal)
        reward = sum(rewards) * self.reward_scaling_coefficient

        reward_info = dict(
            position=reward_position,
            orientation=reward_orientation,
            action=reward_action,
            alive_bonus=alive_bonus,
            extra_bonus=extra_bonus,
            extra_penalty=extra_penalty,
            velocity_towards_goal=reward_velocity_towards_goal,
            all=rewards
        )

        return reward, reward_info

    def initialize_robot(self, randomize=True):
        """
        Method to initial the robot in Simulation environment.

        Args:
            randomize (bool): If ``True``, initialize the robot randomly.

        Returns:
            tuple[numpy.ndarray, numpy.ndarray]: Tuple containing the following vectors in given order:
                - qpose_init (numpy.ndarray): Vector of robot's state after perturbation (dim-18).
                - qvel_init (numpy.ndarray): Vector of robot's velocity after perturbation (dim-6).
        """
        qpos_init = np.array(
                [-0.45, -0.35, 2.52, 1., 0., 0., 0.,1., 0., 0., 0.,
                 0.45, -0.35, 2.52, 1., 0., 0., 0.,1., 0., 0., 0.,
                 0.45, 0.35, 2.52, 1., 0., 0., 0.,1., 0., 0., 0.,
                 -0.45, 0.35, 2.52, 1., 0., 0., 0.,1., 0., 0., 0.,
                 0., 0., 0.425, 1., 0., 0., 0.,1., 0., 0., 0.,1., 0., 0., 0.,1., 0., 0., 0.,1., 0., 0., 0.])

        qvel_init = np.zeros((54,))
        return qpos_init, qvel_init


    def reset_model(self):
        """
        Reset the environment robot model.

        Returns:
            numpy.ndarray: Observation vector to be used at the begining of each episode.
        """
        self._time = 0
        qpos_init, qvel_init = self.initialize_robot(
            randomize=self.randomize_reset)
        self.set_state(qpos_init, qvel_init)
        observation = self._get_obs()
        return observation
