#!/usr/bin/env python

from state import ObservableState, FullState
from numpy.linalg import norm
import numpy as np
from collections import namedtuple
import os
import time
import subprocess
import math
#import tf
import rospy
from math import  pow, pi, sqrt, atan2
import time
from geometry_msgs.msg import Twist, Point, Pose, Quaternion
#from tf.transformations import euler_from_quaternion, quaternion_from_euler

ActionXY = namedtuple('ActionXY', ['vx', 'vy'])
ActionRot = namedtuple('ActionRot', ['v', 'r'])

class Human(object):
    def __init__(self):
        self.px = None
        self.py = None
        self.gx = None
        self.gy = None
        self.vx = None
        self.vy = None
        self.radius=None
        self.theta = None
        self.time_step = 0.25
        self.kinematics = 'holonomic'

    def set(self, px, py, gx, gy, vx, vy, theta, radius=None, v_pref=None):
        self.px = px
        self.py = py
        self.gx = gx
        self.gy = gy
        self.vx = vx
        self.vy = vy
        self.theta = theta
        if radius is not None:
            self.radius = radius
        if v_pref is not None:
            self.v_pref = v_pref

    def get_observable_state(self):
        return ObservableState(self.px, self.py, self.vx, self.vy, self.radius)
    
    def compute_position(self, action, delta_t):
        self.check_validity(action)
        if self.kinematics == 'holonomic':
            px = self.px + action.vx * delta_t
            py = self.py + action.vy * delta_t
        else:
            theta = self.theta + action.r
            px = self.px + np.cos(theta) * action.v * delta_t
            py = self.py + np.sin(theta) * action.v * delta_t
        return px, py


    def get_next_observable_state(self, action):
        self.check_validity(action)
        pos = self.compute_position(action, self.time_step)
        next_px, next_py = pos
        if self.kinematics == 'holonomic':
            next_vx = action.vx
            next_vy = action.vy
        else:
            next_theta = self.theta + action.r
            next_vx = action.v * np.cos(next_theta)
            next_vy = action.v * np.sin(next_theta)
        return ObservableState(next_px, next_py, next_vx, next_vy, self.radius)

    def get_full_state(self):
        return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)

    def get_position(self):
        return self.px, self.py

    def set_position(self, position):
        self.px = position[0]
        self.py = position[1]

    def get_goal_position(self):
        return self.gx, self.gy

    def get_velocity(self):
        return self.vx, self.vy

    def set_velocity(self, velocity):
        self.vx = velocity[0]
        self.vy = velocity[1]
    def check_validity(self, action):
        if self.kinematics == 'holonomic':
            assert isinstance(action, ActionXY)
        else:
            assert isinstance(action, ActionRot)
    def delay_distance(self,action,time_consuming):
        px = self.px + action * time_consuming
        py = self.py + action * time_consuming
        position = [px,py]
        self.px = position[0]
        self.py = position[1]

    def step(self, action):
        """
        Perform an action and update the state
        """
        self.check_validity(action)
        pos = self.compute_position(action, self.time_step)
        self.px, self.py = pos
        if self.kinematics == 'holonomic':
            self.vx = action.vx
            self.vy = action.vy
        else:
            self.theta = (self.theta + action.r) % (2 * np.pi)
            self.vx = action.v * np.cos(self.theta)
            self.vy = action.v * np.sin(self.theta)
    def reached_destination(self):
        return norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < self.radius

class Env():
    def __init__(self):
        self.act = 0
        self.total_x = 0
        self.total_y = 0
        self.human_num = 8
        self.time_step = 0.25
        self.robot = None
        self.humans = None
        self.success_reward = 1
        self.collision_penalty = -0.5
        self.discomfort_dist = 0.1
        self.discomfort_penalty_factor = 0.2
        self.time_limit = 300
        self.global_time = 0
        rospy.on_shutdown(self.shutdown)
        self.cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=5)
        position = Point()
        move_cmd = Twist()
        r = rospy.Rate(10)
        self.tf_listener = tf.TransformListener()
        self.odom_frame = 'odom'

    def shutdown(self):
        self.cmd_vel.publish(Twist())
        rospy.sleep(0.01)
    def set_robot(self, robot):
        self.robot = robot

    def control(self, x, y, z):

        (position, rotation) = self.get_odom()
        last_rotation = 0

        angular_speed = 1
        linear_speed = 1
        position = Point()
        move_cmd = Twist()
        r = rospy.Rate(10)
        self.tf_listener = tf.TransformListener()
        self.odom_frame = 'odom'

        try:
            self.tf_listener.waitForTransform(self.odom_frame, 'base_footprint', rospy.Time(), rospy.Duration(1.0))
            self.base_frame = 'base_footprint'
        except (tf.Exception, tf.ConnectivityException, tf.LookupException):
            try:
                self.tf_listener.waitForTransform(self.odom_frame, 'base_link', rospy.Time(), rospy.Duration(1.0))
                self.base_frame = 'base_link'
            except (tf.Exception, tf.ConnectivityException, tf.LookupException):
                rospy.loginfo("Cannot find transform between odom and base_link or base_footprint")
                rospy.signal_shutdown("tf Execption")

        (goal_x, goal_y, goal_z) = (float(x), float(y), float(z))
        if goal_z > 180 or goal_z < -180:
            print("input wrong goal_z range")
        self.shutdown()

        goal_z = np.deg2rad(goal_z)

        goal_distance = sqrt(pow(goal_x - position.x, 2) + pow(goal_y - position.y, 2))
        distance = goal_distance

        while abs(rotation - goal_z) > 0.02:
            (position, rotation) = self.get_odom()
            if goal_z >= 0:
                if rotation <= goal_z and rotation >= goal_z - pi:
                    move_cmd.linear.x = 0.00
                    move_cmd.angular.z = 0.5
                else:
                    move_cmd.linear.x = 0.00
                    move_cmd.angular.z = -0.5
            else:
                if rotation <= goal_z + pi and rotation > goal_z:
                    move_cmd.linear.x = 0.00
                    move_cmd.angular.z = -0.5
                else:
                    move_cmd.linear.x = 0.00
                    move_cmd.angular.z = 0.5
            self.cmd_vel.publish(move_cmd)
            r.sleep()


        self.cmd_vel.publish(Twist())

        while distance > 0.04:
            (position, rotation) = self.get_odom()
            x_start = position.x
            y_start = position.y
            path_angle = atan2(goal_y - y_start, goal_x - x_start)

            if path_angle < -pi / 4 or path_angle > pi / 4:
                if goal_y < 0 and y_start < goal_y:
                    path_angle = -2 * pi + path_angle
                elif goal_y >= 0 and y_start > goal_y:
                    path_angle = 2 * pi + path_angle
            if last_rotation > pi - 0.1 and rotation <= 0:
                rotation = 2 * pi + rotation
            elif last_rotation < -pi + 0.1 and rotation > 0:
                rotation = -2 * pi + rotation

            move_cmd.angular.z = angular_speed * path_angle - rotation

            distance = sqrt(pow((goal_x - x_start), 2) + pow((goal_y - y_start), 2))

            move_cmd.linear.x = min(linear_speed * distance, 0.1)

            if move_cmd.angular.z > 0:
                move_cmd.angular.z = min(move_cmd.angular.z, 1.5)
            else:
                move_cmd.angular.z = max(move_cmd.angular.z, -1.5)

            last_rotation = rotation
            self.cmd_vel.publish(move_cmd)
            r.sleep()

        (position, rotation) = self.get_odom()
        print("Current position and rotation are: ", (position.x, position.y, rotation))
        print("reached: ^_^")

        # rospy.loginfo("Stopping the robot ...")
        self.cmd_vel.publish(Twist())
        return

    def reset(self):
        self.flag = 0
        self.global_time = 0
        self.human_times = [0] * self.human_num
        time.sleep(0.1)

        self.robot.set(11, 12, 18, 4, 0, 0, np.pi / 2, 0.4, 1)
        self.humans = [Human() for _ in range(self.human_num)]
        self.humans[0].set(7, 7, 7, 15, 0, 0.5, np.pi / 2, 0.4)
        self.humans[1].set(7, 15, 15, 15, 0.5, 0, np.pi / 2, 0.4)
        self.humans[2].set(15, 7, 7, 7, -0.5, 0, np.pi / 2, 0.4)
        self.humans[3].set(15, 15, 15, 7, 0, -0.5, np.pi / 2, 0.4)
        self.humans[4].set(9, 11, 9, 11, 0, 0, np.pi / 2, 0.4)
        self.humans[5].set(10, 11, 10, 11, 0, 0, np.pi / 2, 0.4)
        self.humans[6].set(12, 8, 12, 8, 0, 0, np.pi / 2, 0.4)
        self.humans[7].set(12, 9, 12, 9, 0, 0, np.pi / 2, 0.4)

        ob = [human.get_observable_state() for human in self.humans]

        return ob
    def onestep_lookahead(self, action):
        return self.step(action, update=False)

    def step(self, action, update = True):
        human_actions = [0]*self.human_num
        for i in range(self.human_num):
            human_actions[i] = ActionXY(*self.humans[i].get_velocity())
        
        #检测碰撞 计算相对速度，单位时间的最近距离
        dmin = float('inf')
        collision = False
        for i, human in enumerate(self.humans):
            px = human.px - self.robot.px
            py = human.py - self.robot.py
            if self.robot.kinematics == 'holonomic':
                vx = human.vx - action.vx
                vy = human.vy - action.vy
            else:
                vx = human.vx - action.v * np.cos(action.r + self.robot.theta)
                vy = human.vy - action.v * np.sin(action.r + self.robot.theta)
            ex = px + vx * self.time_step
            ey = py + vy * self.time_step
            # closest distance between boundaries of two agents
            closest_dist = sqrt(pow(ex,2)+pow(ey,2)) - human.radius - self.robot.radius
            if closest_dist <= 0:
                collision = True
                # logging.debug("Collision: distance between robot and p{} is {:.2E}".format(i, closest_dist))
                break
            elif closest_dist < dmin:
                dmin = closest_dist
        #检测是否到达终点
        end_position = np.array(self.robot.compute_position(action, self.time_step))
        reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius

        if self.global_time >= self.time_limit - 1:
            reward = 0
            done = True
            self.flag = 1
            print('Timeout')
        elif collision:
            reward = self.collision_penalty
            done = True
            self.flag = 2
            print('Collision')
        elif reaching_goal:
            reward = self.success_reward
            done = True
            self.flag = 3
            print('ReachGoal')
        elif dmin < self.discomfort_dist:
            reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor
            done = False
            self.flag = 4
            print('Danger(%0.4f)',dmin)
        else:
            reward = 0
            done = False
            print('Nomal')

        if update:
            # update all agents
            self.robot.step(action)
            time_start = time.time()
            px ,py= self.robot.get_position()
            angle = atan2(action.vx,action.vy)
            z = angle * (180 / pi)
            self.control(px, py, z)
            for i, human_action in enumerate(human_actions):
                self.humans[i].step(human_action)

            time_consuming = float(time.time()-time_start)
            for i, human_action in enumerate(human_actions):
                self.humans[i].delay_distance(human_action,time_consuming)

            self.global_time += self.time_step
            for i, human in enumerate(self.humans):
                # only record the first time the human reaches the goal
                if self.human_times[i] == 0 and human.reached_destination():
                    self.human_times[i] = self.global_time
            
            base_action = np.array([0, 0])
            # compute the observation
            ob = [human.get_observable_state() for human in self.humans]
        else:
            ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)]

        return ob, reward, done, self.flag

    def render(self):
        
        time.sleep(0.03)
