import numpy as np  # To deal with data in form of matrices
import tkinter as tk  # To build GUI
import time  # Time is needed to slow down the agent and to see how he runs
from PIL import Image, ImageTk  # For adding images into the canvas widget

# Setting the sizes for the environment

file = open('../random-32-32-10.map','r')
lines = file.readlines()
file.close()

cells_height=[]
for line in lines:
    cell=[]
    for str in line:
        if str=='b':
            cell.append(0)
        if str=='a':
            cell.append(-1)
    cells_height.append(cell)
cells_height = np.array(cells_height)
shape = cells_height.shape[0]
#cells_height = np.rot90(cells_height, -1)
pixels = 20   # pixels

# choose a half of the whole map
env_height = int(len(cells_height) / 2) # grid height
env_width = int(len(cells_height) / 2 )# grid width
inx = np.where(cells_height[0:16,0:16] == np.array(-1))
inx_obstracles = inx+np.array(0)
inx_obstracles = np.rot90(inx_obstracles,-1)
base_action = np.array([0, 0])
b = base_action[1]

# Global variable for dictionary with coordinates for the final route
a = {}
class Environment(tk.Tk,object):
    def __init__(self):
        super(Environment,self).__init__()
        self.action_space = ['up','down','left','right']
        self.n_actions = len(self.action_space)
        self.title('RL environment map')
        self.geometry('{0}x{1}'.format(env_height * pixels, env_height * pixels))
        self.build_environment()

        # Dictionaries to draw the final route
        self.d = {}
        self.f = {}

        # Key for the dictionaries
        self.i = 0

        # Writing the final dictionary first time
        self.c = True

        # Showing the steps for longest found route
        self.longest = 0

        # Showing the steps for the shortest route
        self.shortest = 0

    def build_environment(self):
        self.canvas_widget = tk.Canvas(self, bg='white',
                                   height=env_height * pixels,
                                   width=env_width * pixels)
        # Creating grid lines
        for column in range(0, env_width * pixels, pixels):
            x0, y0, x1, y1 = column, 0, column, env_height * pixels
            self.canvas_widget.create_line(x0, y0, x1, y1, fill='grey')
        for row in range(0, env_height * pixels, pixels):
            x0, y0, x1, y1 = 0, row, env_height * pixels, row
            self.canvas_widget.create_line(x0, y0, x1, y1, fill='grey')

        # obstacles
        self.o = np.array([pixels / 2, pixels / 2])

        # for every obstacles in inx_obstacles
        self.coords_obstacles = []
        for i in range(inx_obstracles.shape[0]):
            obstacles_center = self.o + np.array([pixels*inx_obstracles[i,0], pixels * inx_obstracles[i,1]])
        # Building the obstacle 1
            self.obstacle = self.canvas_widget.create_rectangle(
                obstacles_center[0] - 10, obstacles_center[1] - 10,  # Top left corner
                obstacles_center[0] + 10, obstacles_center[1] + 10,  # Bottom right corner
                outline='grey', fill='#00BFFF')
        # Saving the coordinates of obstacle 1 according to the size of agent
        # In order to fit the coordinates of the agent
            self.coords_obstacle = [self.canvas_widget.coords(self.obstacle)[0]+3 ,
            self.canvas_widget.coords(self.obstacle)[1]+3,
            self.canvas_widget.coords(self.obstacle)[2]-3 ,
            self.canvas_widget.coords(self.obstacle)[3]-3 ]
            self.coords_obstacles.append(self.coords_obstacle)
        #d2 = [c ** 2 for c in l]
        #print(d2)
        self.coords_obstacle_center = [np.array([(c[0]+c[2])/2,(c[1]+c[3])/2]) for c in self.coords_obstacles]
        # Creating an agent of Mobile Robot - red point start point
        #agent_center = self.o + np.array([pixels * 2, pixels * 25]) # the setting of the whole map
        agent_center = self.o + np.array([pixels * 2, pixels * 14])  # the setting of the whole map
        self.agent = self.canvas_widget.create_oval( agent_center[0]-7 , agent_center[1]-7 ,
        agent_center[0] + 7, agent_center[1]+7 ,outline='#FF1493', fill='#FF1493')


        # target point
        #flag_center = self.o + np.array([pixels * 25, pixels * 2])  the whole of map
        flag_center = self.o + np.array([pixels * 12, pixels * 2])
        # Building the flag
        self.flag = self.canvas_widget.create_rectangle(
            flag_center[0] - 10, flag_center[1] - 10,  # Top left corner
            flag_center[0] + 10, flag_center[1] + 10,  # Bottom right corner
            outline='grey', fill='yellow')

        # In order to fit the coordinates of the agent
        self.coords_flag = [self.canvas_widget.coords(self.flag)[0] + 3,
                            self.canvas_widget.coords(self.flag)[1] + 3,
                            self.canvas_widget.coords(self.flag)[2] - 3,
                            self.canvas_widget.coords(self.flag)[3] - 3]

        # Packing everything
        self.canvas_widget.pack()

    # Function to reset the environment and start new Episode
    def reset(self):
        self.update()
        time.sleep(0.2)

        # Updating agent
        self.canvas_widget.delete(self.agent)
        agent_center = self.o + np.array([pixels * 2, pixels * 14])
        self.agent = self.canvas_widget.create_oval(
            agent_center[0] - 7, agent_center[1] - 7,
            agent_center[0] + 7, agent_center[1] + 7,
            outline='red', fill='red')

        # Clearing the dictionary and the i
        self.d = {}
        self.i = 0

        # Return observation
        return self.canvas_widget.coords(self.agent)

    def step(self,action):
        state = self.canvas_widget.coords(self.agent)
        base_action = np.array([0, 0])
        x1 = env_height * pixels
        y1 = env_height * pixels
        r1=0
        r2=0
        #print(self.coords_obstacles)
        flag_center = self.o + np.array([pixels * 12, pixels * 2])
        initial_agent_center = self.o + np.array([pixels * 2, pixels * 14])
        dist_initial = np.sqrt(np.sum(np.square(flag_center-initial_agent_center)))
        #print(self.coords_obstacles)
        move_agent_center = np.array([(state[0]+state[2])/2,(state[1]+state[3])/2])

        dist_current = np.sqrt(np.sum(np.square(flag_center-move_agent_center)))

        # Updating next state according to the action
        # Action 'up'
        if action == 0:
            if state[1] >= pixels:
                base_action[1] -= pixels
        # Action 'down'
        elif action == 1:
            if state[1] < (env_height - 1) * pixels:
                base_action[1] += pixels
        # Action right
        elif action == 2:
            if state[0] < (env_width - 1) * pixels:
                base_action[0] += pixels
        # Action left
        elif action == 3:
            if state[0] >= pixels:
                base_action[0] -= pixels


        # Moving the agent according to the action
        self.canvas_widget.move(self.agent, base_action[0], base_action[1])

        # Writing in the dictionary coordinates of found route
        self.d[self.i] = self.canvas_widget.coords(self.agent)

        # Updating next state
        next_state = self.d[self.i]
        next_state_center = np.array([(next_state[0]+next_state[2])/2,(next_state[1]+next_state[3])/2])
        dist_next_state = np.sqrt(np.sum(np.square(flag_center-next_state_center)))
        d_obs_next_all = [np.sqrt(np.sum(np.square(next_state_center-c))) for c in self.coords_obstacle_center]
        d_obs_next_small = min(d_obs_next_all)

        # Updating key for the dictionary
        self.i += 1
        # for reward function, we split it into three parts ,e.g. r1
        # (represents the reward and punishment relationship between the UAV and target and obstacles,
        # r2 penalty when UAV goes out the of bounds.
        # reward = r1+r2
        # d_obs represent the threshold of the distance between the UAV and nearest obstracle.
        # d_goal represent the threshold of the distance between the UAV and nearest obstracle.
        # d_obs = 60
        d_obs_thr = 60
        dist_goal_thr = 60
        if next_state == self.coords_flag:
            time.sleep(0.1)
            r1 = 20
            r2 = 0
            done = True
            next_state = 'goal'
            print('Goal!')

   # Filling the dictionary first time
            if self.c == True:
                for j in range(len(self.d)):
                    self.f[j] = self.d[j]
                self.c = False
                self.longest = len(self.d)
                self.shortest = len(self.d)

            # Checking if the currently found route is shorter
            if len(self.d) < len(self.f):
                # Saving the number of steps for the shortest route
                self.shortest = len(self.d)
                # Clearing the dictionary for the final route
                self.f = {}
                # Reassigning the dictionary
                for j in range(len(self.d)):
                    self.f[j] = self.d[j]

            # Saving the number of steps for the longest route
            if len(self.d) > self.longest:
                self.longest = len(self.d)

        elif next_state in self.coords_obstacles:
            r1 = -20
            r2 = 0
            done = True
            next_state = 'obstacle'

            # Clearing the dictionary and the i
            self.d = {}
            self.i = 0

        elif (next_state_center[0] >= x1-pixels/2 or next_state_center[1] >= y1-pixels/2 or next_state_center[0] <=pixels/2 or next_state_center[1] <=pixels/2):
            r2 = -30
            r1 = 0
                #reward = -10
            done = True
            next_state = 'out of bonder'
        else:
            r2=0
            if d_obs_next_small < d_obs_thr:
                r1=-1
            elif dist_current < dist_initial and dist_next_state < dist_current:
                r1 = 0
            elif dist_current > dist_initial or dist_next_state > dist_current:
                r1 = -1
            elif dist_current < dist_goal_thr:
                r1 = 10

            done = False

        reward = r1+r2


        nextaction = self.NextValidActions(action,next_state_center)
        #print('nn')

        return next_state, reward, done


    def NextValidActions(self,prev_action,next_state):
        available_actions = [0]
        n_moves = 4
        opposite_actions = {0: 1, 2: 3, 1: 0, 3: 2}  # 0 up ,1 down, 2,right,3,left
        #print(opposite_actions[0], opposite_actions[1], opposite_actions[2], opposite_actions[3])
        for action in range(1, n_moves):
            x1 = env_height * pixels
            y1 = env_height * pixels
            if (next_state[0] >= x1-pixels/2 or next_state[1] >= y1-pixels/2):  # out of bounds
                continue
            available_actions.append(action)

        if opposite_actions[prev_action] in available_actions:
            available_actions.remove(opposite_actions[prev_action])

        return available_actions


    def render(self):
        time.sleep(0.05) # refresh the environment as the agent moving
        self.update()

    def final(self):
        # Deleting the agent at the end
        self.canvas_widget.delete(self.agent)

        # Showing the number of steps
        print('The shortest route:', self.shortest)
        print('The longest route:', self.longest)

        # Creating initial point
        agent_center = self.o + np.array([pixels * 2, pixels * 14])
        self.initial_point = self.canvas_widget.create_oval(
            agent_center[0] - 7, agent_center[1] - 7,
            agent_center[0] + 7, agent_center[1] + 7,
            fill='blue', outline='blue')

        # Filling the route
        for j in range(len(self.f)):
            # Showing the coordinates of the final route
            print(self.f[j])
            self.track = self.canvas_widget.create_oval(
                self.f[j][0] - 3 + self.o[0] - 4, self.f[j][1] - 3 + self.o[1] - 4,
                self.f[j][0] - 3 + self.o[0] + 4, self.f[j][1] - 3 + self.o[1] + 4,
                fill='blue', outline='blue')
            # Writing the final route in the global variable a
            a[j] = self.f[j]


def final_states():
    return a
# debug the environment
# If we want to run and see the environment without running full algorithm
if __name__ == '__main__':
    env = Environment()
    env.mainloop()