#!/usr/bin/env python

from numpy.linalg import norm
import numpy as np
import os
import time
import subprocess
import math
from math import  pow, pi
import time
import Tkinter as tk
from PIL import ImageTk, Image
np.random.seed(1)
PhotoImage = ImageTk.PhotoImage
UNIT = 30  # pixels
HEIGHT = 21  # grid height
WIDTH = 21  # grid width

class Human(object):
    def __init__(self):
        self.px = None
        self.py = None
        self.gx = None
        self.gy = None
        self.vx = None
        self.vy = None
        self.radius=None
        self.theta = None
        self.time_step = 0.25
        self.kinematics = 'holonomic'

    def set(self, px, py, gx, gy, vx, vy, theta, radius=None, v_pref=None):
        self.px = px
        self.py = py
        self.gx = gx
        self.gy = gy
        self.vx = vx
        self.vy = vy
        self.theta = theta
        if radius is not None:
            self.radius = radius
        if v_pref is not None:
            self.v_pref = v_pref

    def get_observable_state(self):
        return (self.px, self.py, self.vx, self.vy, self.radius)
    
    def compute_position(self, action, delta_t):
        self.check_validity(action)
        if self.kinematics == 'holonomic':
            px = self.px + action.vx * delta_t
            py = self.py + action.vy * delta_t
        else:
            theta = self.theta + action.r
            px = self.px + np.cos(theta) * action.v * delta_t
            py = self.py + np.sin(theta) * action.v * delta_t


    def get_next_observable_state(self, action):
        self.check_validity(action)
        pos = self.compute_position(action, self.time_step)
        next_px, next_py = pos
        if self.kinematics == 'holonomic':
            next_vx = action.vx
            next_vy = action.vy
        else:
            next_theta = self.theta + action.r
            next_vx = action.v * np.cos(next_theta)
            next_vy = action.v * np.sin(next_theta)
        return ObservableState(next_px, next_py, next_vx, next_vy, self.radius)

    def get_full_state(self):
        return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)

    def get_position(self):
        return self.px, self.py

    def set_position(self, position):
        self.px = position[0]
        self.py = position[1]

    def get_goal_position(self):
        return self.gx, self.gy

    def get_velocity(self):
        return self.vx, self.vy

    def set_velocity(self, velocity):
        self.vx = velocity[0]
        self.vy = velocity[1]

    def step(self, action):
        """
        Perform an action and update the state
        """
        self.check_validity(action)
        pos = self.compute_position(action, self.time_step)
        self.px, self.py = pos
        if self.kinematics == 'holonomic':
            self.vx = action.vx
            self.vy = action.vy
        else:
            self.theta = (self.theta + action.r) % (2 * np.pi)
            self.vx = action.v * np.cos(self.theta)
            self.vy = action.v * np.sin(self.theta)
    def reached_destination(self):
        return norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < self.radius

class Env(tk.Tk):
    def __init__(self):
        tk.Tk.__init__(self)
        self.title('LSTM_RL')
        self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT))
        self.shapes = self.load_images()
        self.canvas = self._build_canvas()
        self.texts = []
	    self.act = 0
	    self.total_x = 0
	    self.total_y = 0
        self.human_num = 8
        self.time_step = 0.25
        self.robot = None
        self.humans = None
        self.success_reward = 1
        self.collision_penalty = -0.5
        self.discomfort_dist = 0.1
        self.discomfort_penalty_factor = 0.2
        self.time_limit = 300

    def _build_canvas(self):
        canvas = tk.Canvas(self, bg='aliceblue',#'#F0F8FF'
                           height=HEIGHT * UNIT,
                           width=WIDTH * UNIT)
        # create grids
        for c in range(0, WIDTH * UNIT, UNIT):  # 0~400 by 80
            x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT
            canvas.create_line(x0, y0, x1, y1)
        for r in range(0, HEIGHT * UNIT, UNIT):  # 0~400 by 80
            x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r
            canvas.create_line(x0, y0, x1, y1)

        # add img to canvas
        self.rectangle = canvas.create_image(315, 345, image=self.shapes[0])
        self.triangle1 = canvas.create_image(195, 195, image=self.shapes[1])
        self.triangle2 = canvas.create_image(195, 435, image=self.shapes[1])
        self.triangle3 = canvas.create_image(435, 195, image=self.shapes[1])
        self.triangle4 = canvas.create_image(435, 435, image=self.shapes[1])
        self.circle = canvas.create_image(525, 105, image=self.shapes[2])
        self.yellow_rectangle1 = canvas.create_image(255, 285, image=self.shapes[3])
        self.yellow_rectangle2 = canvas.create_image(285, 285, image=self.shapes[3])
        self.yellow_rectangle3 = canvas.create_image(345, 225, image=self.shapes[3])
        self.yellow_rectangle4 = canvas.create_image(345, 255, image=self.shapes[3])

        # pack all
        canvas.pack()

        return canvas


    def load_images(self):
        rectangle = PhotoImage(
            Image.open("/home/wangqiang/catkin_ws/src/turtlebot3_machine_learning/turtlebot3_dqn/img/rectangle.png").resize((20, 20)))
        triangle = PhotoImage(
            Image.open("/home/wangqiang/catkin_ws/src/turtlebot3_machine_learning/turtlebot3_dqn/img/triangle.png").resize((20, 20)))
        circle = PhotoImage(
            Image.open("/home/wangqiang/catkin_ws/src/turtlebot3_machine_learning/turtlebot3_dqn/img/circle.png").resize((20, 20)))
        yellow_rectangle = PhotoImage(
            Image.open("/home/wangqiang/catkin_ws/src/turtlebot3_machine_learning/turtlebot3_dqn/img/Yellow_Rectangle.png").resize((20, 20)))

        return rectangle, triangle, circle, yellow_rectangle



    def coords_to_state(self, coords):
        x = int((coords[0] - 15) / 30)
        y = int((coords[1] - 15) / 30)
        return [x, y]

    def state_to_coords(self, state):
        x = int(state[0] * 30 + 15)
        y = int(state[1] * 30 + 15)
        return [x, y]

    def set_robot(self, robot):
        self.robot = robot

    def reset(self):
        self.flag = 0
        self.global_time = 0
        self.human_times = [0] * self.human_num
        time.sleep(0.1)
        x, y = self.canvas.coords(self.rectangle)
        self.canvas.move(self.rectangle, UNIT / 2 - x + 300, UNIT / 2 - y + 330)
        x1, y1 = self.canvas.coords(self.triangle1)
        self.canvas.move(self.triangle1, UNIT / 2 - x1 + 180, UNIT / 2 - y1 + 180)
        x2, y2 = self.canvas.coords(self.triangle2)
        self.canvas.move(self.triangle2, UNIT / 2 - x2 + 180, UNIT / 2 - y2 + 420)
        x3, y3 = self.canvas.coords(self.triangle3)
        self.canvas.move(self.triangle3, UNIT / 2 - x3 + 420, UNIT / 2 - y3 + 180)
        x4, y4 = self.canvas.coords(self.triangle4)
        self.canvas.move(self.triangle4, UNIT / 2 - x4 + 420, UNIT / 2 - y4 + 420)
        self.render()

        self.robot.set(11, 12, 18, 4, 0, 0, np.pi / 2, 0.4, 1)
        self.humans = [Human() for _ in range(self.human_num)]
        self.humans[0].set(7, 7, 7, 15, 0, 0.5, np.pi / 2, 0.4)
        self.humans[1].set(7, 15, 15, 15, 0.5, 0, np.pi / 2, 0.4)
        self.humans[2].set(15, 7, 7, 7, -0.5, 0, np.pi / 2, 0.4)
        self.humans[3].set(15, 15, 15, 7, 0, -0.5, np.pi / 2, 0.4)
        self.humans[4].set(9, 11, 9, 11, 0, 0, np.pi / 2, 0.4)
        self.humans[5].set(10, 11, 10, 11, 0, 0, np.pi / 2, 0.4)
        self.humans[6].set(12, 8, 12, 8, 0, 0, np.pi / 2, 0.4)
        self.humans[7].set(12, 9, 12, 9, 0, 0, np.pi / 2, 0.4)

        ob = [human.get_observable_state() for human in self.humans]

        return ob
    def onestep_lookahead(self, action):
        return self.step(action, update=False)

    def step(self, action, update = True):
        human_actions = [0,0]*self.human_num
        for i in self.human_num:
            human_actions[i] = self.humans[i].get_velocity()
        
        #检测碰撞 计算相对速度，单位时间的最近距离
        dmin = float('inf')
        collision = False
        for i, human in enumerate(self.humans):
            px = human.px - self.robot.px
            py = human.py - self.robot.py
            if self.robot.kinematics == 'holonomic':
                vx = human.vx - action.vx
                vy = human.vy - action.vy
            else:
                vx = human.vx - action.v * np.cos(action.r + self.robot.theta)
                vy = human.vy - action.v * np.sin(action.r + self.robot.theta)
            ex = px + vx * self.time_step
            ey = py + vy * self.time_step
            # closest distance between boundaries of two agents
            closest_dist = norm(ex,ey) - human.radius - self.robot.radius
            if closest_dist <= 0:
                collision = True
                # logging.debug("Collision: distance between robot and p{} is {:.2E}".format(i, closest_dist))
                break
            elif closest_dist < dmin:
                dmin = closest_dist
        #检测是否到达终点
        end_position = np.array(self.robot.compute_position(action, self.time_step))
        reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius

        if self.global_time >= self.time_limit - 1:
            reward = 0
            done = True
            flag = 1
            print('Timeout')
        elif collision:
            reward = self.collision_penalty
            done = True
            flag = 2
            print('Collision')
        elif reaching_goal:
            reward = self.success_reward
            done = True
            flag = 3
            print('ReachGoal')
        elif dmin < self.discomfort_dist:
            reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor
            done = False
            flag = 4
            print(Danger(%0.4f),dmin)
        else:
            reward = 0
            done = False
            print('Nomal')

        if update:
            # update all agents
            self.robot.step(action)
            for i, human_action in enumerate(human_actions):
                self.humans[i].step(human_action)
            self.global_time += self.time_step
            for i, human in enumerate(self.humans):
                # only record the first time the human reaches the goal
                if self.human_times[i] == 0 and human.reached_destination():
                    self.human_times[i] = self.global_time
            
            base_action = np.array([0, 0])
            self.render()
            base_action[0] += action.vx*UNIT
            base_action[1] -= action.vy*UNIT                
            self.canvas.move(self.rectangle, base_action[0], base_action[1])
            self.canvas.tag_raise(self.rectangle)
            human_next_px0 = self.humans[0].vx * self.time_step
            human_next_py0 = self.humans[0].vy * self.time_step               
            self.canvas.move(self.triangle1, human_next_px0, human_next_py0)
            self.canvas.tag_raise(self.triangle1)
            human_next_px1 = self.humans[1].vx * self.time_step
            human_next_py1 = self.humans[1].vy * self.time_step
            self.canvas.move(self.triangle2, human_next_px1, human_next_py1)
            self.canvas.tag_raise(self.triangle2)
            human_next_px2 = self.humans[2].vx * self.time_step
            human_next_py2 = self.humans[2].vy * self.time_step
            self.canvas.move(self.triangle3, human_next_px2, human_next_py2)
            self.canvas.tag_raise(self.triangle3)
            human_next_px3 = self.humans[3].vx * self.time_step
            human_next_py3 = self.humans[3].vy * self.time_step
            self.canvas.move(self.triangle4, human_next_px3, human_next_py3)
            self.canvas.tag_raise(self.triangle4)

            # compute the observation
            ob = [human.get_observable_state() for human in self.humans]
        else:
            ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)]

        return ob, reward, done, flag

    def render(self):
        time.sleep(0.03)