import os
from gym import spaces
import numpy as np
from numpy.core.records import array
import pybullet as p
from .env import RobotEnv

import time
class ReachingEnv(RobotEnv):

    def __init__(self, robot_type='jaco'):
        super(ReachingEnv, self).__init__(robot_type=robot_type, frame_skip=2, time_step=0.02, action_robot_len=12, obs_robot_len=31)
        self.robot, self.robot_lower_limits, self.robot_upper_limits, self.robot_right_arm_joint_indices, self.robot_left_arm_joint_indices = self.world_creation.create_new_world(print_joints=False)

        self.ObjectID = p.loadURDF("/home/maxwene/project/kinova_jaco6/reaching_jaco/new_jaco/object_demo.urdf", [-0.35, -0.355, 0.2], globalScaling=0.004)
        self.robot_forces = 1.0
        self.robot_gains = 0.05
        self.distance_weight = 10.0
        self.task_success_threshold = 0.03
    def step(self, action):
        # from config file (deleted) (task = [reaching])

        # Execute action
        self.take_step(action, gains=self.robot_gains)

        # Get the Euclidean distance between the robot's end effector and the target
        gripper_pos,gripper_orient = np.array(p.getLinkState(self.robot, 8, computeForwardKinematics=True, physicsClientId=self.id)[:2])
        gripper_pos = list(gripper_pos)
        gripper_pos[2] = gripper_pos[2]-0.08
        
        reward_distance_target = np.linalg.norm(gripper_pos - self.target_pos)
        reward_orient_target = np.linalg.norm(np.array(gripper_orient)-np.array((1,0,0,0)))
        reward_target_target = np.linalg.norm(self.target_pos - self.target_pos_real)
        ## reward shaping --> 距离的势函数，位姿的范数
        reward = (reward_distance_target - self.pre_distance)*(-100) +(reward_orient_target - self.pre_ori)*(-1)
        self.pre_distance = reward_distance_target
        self.pre_ori = reward_orient_target
        self.pre_target = reward_target_target
        # Get end effector velocity
        # end_effector_velocity = np.linalg.norm(p.getLinkState(self.robot, 8, computeForwardKinematics=True, computeLinkVelocity=True, physicsClientId=self.id)[6])

        # Get observations and reward
        obs = self._get_obs()
        # reward = self.distance_weight * reward_distance_target
        reward = reward
        info = {'task_success': int(-reward_distance_target <= self.task_success_threshold), 'action_robot_len': self.action_robot_len, 'obs_robot_len': self.obs_robot_len}
        done = False
        if gripper_pos[2]<0:
            reward -=10

        self.steps +=1
        if reward_distance_target <= 0.04:
            reward +=100
            done = True
        # if reward_target_target <= 0.01:
        #     reward +=1000
            
        if self.steps >249:
            done = True
        return obs, reward, done, info

    def _get_obs(self):
        torso_pos = np.array(p.getLinkState(self.robot, 0, computeForwardKinematics=True, physicsClientId=self.id)[0])
        robot_joint_positions = np.array([[x[0],x[1]] for x in p.getJointStates(self.robot, jointIndices=self.robot_right_arm_joint_indices, physicsClientId=self.id)])
        ## 实时更新目标点的位置
        self.target_pos, self.target_orient = p.getBasePositionAndOrientation(self.ObjectID)
        self.target_pos, self.target_orient = np.array(self.target_pos), np.array(self.target_orient)
        gripper_pos, gripper_orient = p.getLinkState(self.robot, 8, computeForwardKinematics=True, physicsClientId=self.id)[:2]
        gripper_pos = list(gripper_pos)
        gripper_pos[2] = gripper_pos[2]-0.06
        robot_obs = np.concatenate([gripper_pos-torso_pos, gripper_pos-self.target_pos, gripper_pos,self.target_pos,gripper_pos,np.concatenate(robot_joint_positions).ravel(), gripper_orient]).ravel()

        return robot_obs

    def reset(self):
        self.target_pos_real = np.array([-0.35, 0, 0.0])
        self.steps = 0  
        if self.gui:
            self.robot, self.robot_lower_limits, self.robot_upper_limits, self.robot_right_arm_joint_indices, self.robot_left_arm_joint_indices = self.world_creation.create_new_world(print_joints=False)
            self.ObjectID = p.loadURDF("/home/maxwene/project/kinova_jaco6/reaching_jaco/new_jaco/object_demo.urdf", [-0.35, -0.355, 0.1], globalScaling=0.004)
        
        p.setGravity(0,0,-9.8)
        p.resetBasePositionAndOrientation(self.ObjectID, [-0.35, -0.355, 0.2], [0,0,0,1])
        self.target_pos, self.target_orient = p.getBasePositionAndOrientation(self.ObjectID)

        # # Jaco
        self.position_robot_toc(self.robot, 12, self.robot_left_arm_joint_indices, self.robot_lower_limits, self.robot_upper_limits, pos_offset=np.array([0, 0, 0]))
        
        self.world_creation.set_gripper_open_position(self.robot, position=0, left=True, set_instantly=True)
        # Enable rendering
        p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)

        gripper_pos,gripper_orient= p.getLinkState(self.robot, 8, computeForwardKinematics=True, physicsClientId=self.id)[:2]        
        reward_distance_target = np.linalg.norm(gripper_pos - np.array(self.target_pos))
        self.pre_distance = reward_distance_target
        self.pre_ori = np.linalg.norm(np.array(gripper_orient)-np.array((1,0,0,0)))

        self.pre_target = np.linalg.norm(np.array(self.target_pos)-self.target_pos_real)
        return self._get_obs()
    def reset_target(self):
        pass

