from operator import is_not
from importlib_metadata import re
import numpy as np
import cv2
import os
import sys
import time
import math
import argparse
import logging
import logging.handlers
import random
import datetime
import json
import requests
import base64
import urllib
import copy
import torch

from lidar_scan import *

import utils
import TD3
from logger import Logger

# generate random obstacles
def generate_obstacles():
    obstacle_x = []
    obstacle_y = []
    obstacle_r = []

    # random base_angle of range 0-2*3.1415
    base_angle = np.random.uniform(0, 2*3.1415)

    rnd_range = 20

    for i in range(5):
        x = math.cos(base_angle+i*(3.1415*2.0/5.0)) * 200
        y = math.sin(base_angle+i*(3.1415*2.0/5.0)) * 200
        # cast x and y to int
        x = int(x)
        y = int(y)
        # put x,y on center of canvas
        x = 400 + x
        y = 400 + y
        x = x+ np.random.randint(-rnd_range, rnd_range)
        y = y+ np.random.randint(-rnd_range, rnd_range)
        
        r = np.random.randint(10, 20)

        # add x and y to obstacle list
        obstacle_x.append(x)
        obstacle_y.append(y)
        obstacle_r.append(r)

    return obstacle_x, obstacle_y, obstacle_r

# generate lidar observation
def get_lidar_observation(canvas,pos_x,pos_y):

    laser_scan = []
    # generate lidar scan
    for i in range(360):
        angle = i*3.1415/180.0
        # cast a ray of angle angle
        x = math.cos(angle) * 800 + pos_x
        y = math.sin(angle) * 800 + pos_y
        cx = pos_x
        cy = pos_y
        # traverse the line from (cx,cy) to (x,y)
        dx = math.cos(angle)
        dy = math.sin(angle)
        distance = -1
        for j in range(800):
            ox = cx + dx * j
            oy = cy + dy * j
            # check if ox,oy is in out of canvas
            if ox < 0 or oy < 0 or ox >= 800 or oy >= 800:
                break
            # get color of pixel at (ox,oy)
            color = canvas[int(oy), int(ox)]
            # check color of pixel at (ox,oy)
            if color[0] == 0 and color[1] == 0 and color[2] == 255:
                distance = j*math.sqrt((dx)*(dx)+ (dy)*(dy))
                break
        # add distance to laser_scan
        laser_scan.append(distance)
 
    return laser_scan

def get_lidar_observation_fast(canvas,pos_x,pos_y):
    
    maxRadius = 800

    # scale canvas to half size
    canvas_scale = cv2.resize(canvas, (0,0), fx=0.5, fy=0.5)
    center = (pos_x*0.5,pos_y*0.5) 

    # convert canvas to gray scale
    canvas_gray = cv2.cvtColor(canvas_scale, cv2.COLOR_BGR2GRAY)

    # perform log_polar transform
    # warp_polar
    canvas_clone_polar= cv2.warpPolar(canvas_gray,center=center,maxRadius=maxRadius,dsize=(canvas_scale.shape[0],canvas_scale.shape[1]),flags=cv2.WARP_POLAR_LINEAR)

    # create a new gray image with same size as canvas
    canvas_polar = np.zeros((canvas_scale.shape[0],canvas_scale.shape[1]), np.uint8)

    # # check every pixel in canvas
    front_seg(canvas_clone_polar,canvas_polar)

    # inverse warp_polar
    canvas_clone_polar_inv= cv2.warpPolar(canvas_polar,center=center,maxRadius=maxRadius,dsize=(canvas_scale.shape[0],canvas_scale.shape[1]),flags=cv2.WARP_INVERSE_MAP + cv2.WARP_POLAR_LINEAR)
    
    # draw a rectangle around the canvas
    cv2.rectangle(canvas_clone_polar_inv,(0,0),(canvas_scale.shape[0],canvas_scale.shape[1]),(0,0,0),5)

    # convert lidar image to 0-360 scan data
    # create a float np array with size of 360
    laser_scan = np.zeros(360, np.float32)

    # inflation of canvas_clone_polar_inv
    # canvas_clone_polar_inv_inflated = cv2.dilate(canvas_clone_polar_inv,kernel=np.ones((5,5),np.uint8))

    lidar_to_scan(canvas_clone_polar_inv,laser_scan,pos_x*0.5,pos_y*0.5)

    # multiply laser_scan data by 2
    laser_scan = laser_scan * 2
    
    return canvas_clone_polar_inv,laser_scan

# convert laser_scan to 2D points
def laser_scan_to_points(laser_scan,pos_x,pos_y):
    points = []
    for i in range(len(laser_scan)):
        angle = i*3.1415/180.0
        # cast a ray of angle angle
        x = math.cos(angle) * laser_scan[i]
        y = math.sin(angle) * laser_scan[i]
        # put x,y on center of canvas
        x = pos_x + x
        y = pos_y + y
        # add x and y to points list
        if laser_scan[i] > 0:
            points.append([x,y])
        else:
            # points.append([pos_x+math.cos(angle) * 1000,pos_y+math.sin(angle) * 1000])
            points.append([pos_x,pos_y])
    return points

# class of robot
class Robot:
    def __init__(self,pos_x=400,pos_y=400):
        self.steps = 0
        self.pos_x = pos_x
        self.pos_y = pos_y
        self.old_pos_x = pos_x
        self.old_pos_y = pos_y
        self.angle = 0
        self.velocity = 0
        self.max_velocity = 100
        self.duration = 0.01 # duration of each step in seconds
        self.radius = 90    # radius of robot

    # reset
    def reset(self):
        self.steps = 0
        self.pos_x = 400
        self.pos_y = 400
        self.old_pos_x = self.pos_x
        self.old_pos_y = self.pos_y
        self.angle = 0
        self.velocity = 0
        self.max_velocity = 100
        self.duration = 0.01 # duration of each step in seconds
        self.radius = 90    # radius of robot

    def get_pos(self):
        return self.pos_x,self.pos_y
    def set_pos(self,pos_x,pos_y):
        self.pos_x = pos_x
        self.pos_y = pos_y

    # collision detection
    def collision_detection(self,pos_x,pos_y,obstacle_x,obstacle_y,obstacle_r):
        col = False
        min_distance = 10000000
        for i in range(len(obstacle_x)):
            # update min_distance
            distance = math.sqrt((pos_x-obstacle_x[i])*(pos_x-obstacle_x[i])+(pos_y-obstacle_y[i])*(pos_y-obstacle_y[i]))-(obstacle_r[i]+self.radius)
            distance = abs(distance)
            if distance < min_distance:
                min_distance = distance

            # check if robot is in obstacle
            if math.sqrt((pos_x-obstacle_x[i])**2 + (pos_y-obstacle_y[i])**2) < obstacle_r[i]+self.radius:
                col = True
                break

        return col,min_distance

    def step(self,angle,velocity,obstacle_x,obstacle_y,obstacle_r):

        success_ = False
        
        reward = -1

        self.steps += 1

        # move robot by velocity and angle
        self.angle = angle
        velocity_ = velocity
        # self.velocity = min(self.max_velocity,velocity_)

        # # test
        # self.pos_x += angle*self.duration
        # self.pos_y += velocity*self.duration
        # # check if robot is in obstacle[0]
        # if math.sqrt((self.pos_x-obstacle_x[0])*(self.pos_x-obstacle_x[0]) + (self.pos_y-obstacle_y[0])*(self.pos_y-obstacle_y[0])) < obstacle_r[0]+self.radius:
        #     return 0
        # else:
        #     return -1
        
        # pos_x = math.cos(angle) * velocity_ * self.duration + self.pos_x
        # pos_y = math.sin(angle) * velocity_ * self.duration + self.pos_y
        pos_x = angle*self.duration + self.pos_x
        pos_y = velocity*self.duration + self.pos_y
        collision_ = False
        # collision detection
        collision_,min_distance = self.collision_detection(pos_x,pos_y,obstacle_x,obstacle_y,obstacle_r)
        if not collision_:
            self.pos_x = pos_x
            self.pos_y = pos_y
        
        # using pointPolygonTest to examine if robot is inside the polygon of obstacles
        # construct contor using obstacles
        contour = np.zeros((len(obstacle_x),1,2), np.int32)
        for i in range(len(obstacle_x)):
            contour[i][0][0] = obstacle_x[i]
            contour[i][0][1] = obstacle_y[i]
        # pointPolygonTest for robot position
        # if robot is inside the polygon, return True
        if cv2.pointPolygonTest(contour,(self.pos_x,self.pos_y),True) > 0:
            reward = -1
        else:
            reward = 0
            success_ = True

        # hard punishment for collision
        if collision_:
            reward -= 1
        # soft punishment for distance
        r_distance = -1.0/(min_distance+1)
        reward += r_distance

        self.old_pos_x = self.pos_x
        self.old_pos_y = self.pos_y

        return reward,success_

# capture cursor position
cursor_x,cursor_y = 0,0
def on_mouse_move(event,x,y,flags,param):
    global cursor_x,cursor_y
    cursor_x = x
    cursor_y = y

# class environment
class Environment:
    def __init__(self):
        self.canvas = np.zeros((800,800,3), np.uint8)
        self.canvas_clone = self.canvas.copy()
        self.obstacle_x = []
        self.obstacle_y = []
        self.obstacle_r = []
        self.obstacle_num = 5
        self.max_timesteps = 1e9
        self.episode_steps = 200
        self.current_step = 0

        self.R = Robot(400,400)

        self.laser_scan = np.zeros(360, np.float32)
        self.obs = np.zeros(15, np.float32)             # obstacles
        self.reward = -1
        self.success = False

        # create obstacles
        self.obstacle_x ,self.obstacle_y,self.obstacle_r = generate_obstacles()

    # observe
    def observe(self):
        # clone canvas
        canvas_clone = self.canvas.copy()
        
        # draw obstacles
        for i in range(len(self.obstacle_x)):
            x = self.obstacle_x[i]
            y = self.obstacle_y[i]
            r = self.obstacle_r[i]
            # draw obstacle
            cv2.circle(canvas_clone, (x, y), r, (255,255, 255), -1)
        
        # lidar scan
        # _,self.laser_scan = get_lidar_observation_fast(canvas_clone,self.R.pos_x,self.R.pos_y)

        self.canvas_clone = canvas_clone.copy()

        # calculate the angle of each obstacles
        # self.obs = np.zeros(3, np.float32)
        # self.obs[0] = self.obstacle_x[0]-self.R.pos_x
        # self.obs[1] = self.obstacle_y[0]-self.R.pos_y
        # self.obs[2] = self.obstacle_r[0]

        # calculate the angle of each obstacles

        # self.obs[0] = math.atan2(self.obstacle_y[0]-self.R.pos_y,self.obstacle_x[0]-self.R.pos_x)
        # self.obs[0] = self.obs[0] if self.obs[0]>=0 else 2*math.pi+self.obs[0]
        # self.obs[1] = math.sqrt((self.obstacle_x[0]-self.R.pos_x)*(self.obstacle_x[0]-self.R.pos_x) + (self.obstacle_y[0]-self.R.pos_y)*(self.obstacle_y[0]-self.R.pos_y))
        # self.obs[2] = self.obstacle_r[0]
        for i in range(len(self.obstacle_x)):
            self.obs[i] = self.obstacle_x[i]-self.R.pos_x
            self.obs[i+1] = self.obstacle_y[i]-self.R.pos_y
            self.obs[i+2] = self.obstacle_r[i]-self.R.radius
            # # diatance between current obstacle and next obstacle
            # if i < len(self.obstacle_x)-1:
            #     self.obs[i+3] = math.sqrt((self.obstacle_x[i+1]-self.obstacle_x[i])*(self.obstacle_x[i+1]-self.obstacle_x[i]) + (self.obstacle_y[i+1]-self.obstacle_y[i])*(self.obstacle_y[i+1]-self.obstacle_y[i]))
            # else:
            #     self.obs[i+3] = math.sqrt((self.obstacle_x[0]-self.obstacle_x[i])*(self.obstacle_x[0]-self.obstacle_x[i]) + (self.obstacle_y[0]-self.obstacle_y[i])*(self.obstacle_y[0]-self.obstacle_y[i]))
        # max_i3 = 0
        # for i in range(len(self.obstacle_x)):
        #     if max_i3 < self.obs[i+3]:
        #         max_i3 = self.obs[i+3]
        # for i in range(len(self.obstacle_x)):
        #     if self.obs[i+3] == max_i3:
        #         self.obs[i+3] = 1
        #     else:
        #         self.obs[i+3] = 0

    # actuate
    def step(self,angle,velocity):

        self.current_step += 1

        angle *= 100
        velocity *= 100
        # move robot
        self.reward,self.success = self.R.step(angle,velocity,self.obstacle_x,self.obstacle_y,self.obstacle_r)

        # observe
        self.observe()

        # # reward post-processing
        # max_i3 = 0
        # max_i = 0
        # for i in range(len(self.obstacle_x)):
        #     if max_i3 < self.obs[i+3]:
        #         max_i3 = self.obs[i+3]
        #         max_i = i
        # # if robot is near the position of self.obs[max_i] , reward += 0.5
        # if abs(self.R.pos_x-self.obstacle_x[max_i])<=30 and abs(self.R.pos_y-self.obstacle_y[max_i])<=30:
        #     self.reward += 0.5


        # done
        done = False
        if self.current_step >= self.episode_steps or self.success == True:
            done = True

        # return self.laser_scan,self.reward,done
        return self.obs,self.reward,done,self.success

    # reset
    def reset(self):
        self.reward = -1
        self.current_step = 0
        self.R.reset()
        # create obstacles
        self.obstacle_x ,self.obstacle_y,self.obstacle_r = generate_obstacles()
        self.observe()
        # return self.laser_scan
        return self.obs

    # render
    def render(self):
        # laser_points = laser_scan_to_points(self.laser_scan,self.R.pos_x,self.R.pos_y)
        # # draw laser_points on canvas
        # for i in range(len(laser_points)):
        #     x = laser_points[i][0]
        #     y = laser_points[i][1]
        #     cv2.circle(self.canvas_clone, (int(x), int(y)), 1, (255, 255, 255), -1)
        #     # draw line from self to laser_points[i]
        #     cv2.line(self.canvas_clone, (int(self.R.pos_x), int(self.R.pos_y)), (int(x), int(y)), (255, 255, 255), 1)
        # draw the robot R
        # draw obstacle
        for i in range(1):
            x = self.obstacle_x[i]
            y = self.obstacle_y[i]
            r = self.obstacle_r[i]
            # draw obstacle
            cv2.circle(self.canvas_clone, (x, y), r, (255,255, 255), -1)
        if self.reward == 1:
            cv2.circle(self.canvas_clone, (int(self.R.pos_x), int(self.R.pos_y)), self.R.radius, (0, 255, 0), -1)
        else:
            cv2.circle(self.canvas_clone, (int(self.R.pos_x), int(self.R.pos_y)), self.R.radius, (0, 0, 255), -1)

        return self.canvas_clone

# evaluate the performance of the agent
def evaluate(agent,env,num_episodes=50):
    env.reset()
    episode_reward = 0
    success = 0.0
    done = False

    for k in range(num_episodes):
        max_reward = -10.0
        state = env.reset()
        done = False
        while not done:
            action = agent.select_action(state)
            state,reward,done,success_ = env.step(action[0],action[1])
            if success_ == True:
                success += 1.0
            episode_reward += reward

    success /= num_episodes
    episode_reward /= num_episodes
    print("---------------------------------------")
    print("success rate: ",success)
    print("---------------------------------------")

    return success, episode_reward

def main():

    env = Environment()

    L = Logger("./", use_tb=True)
    
    parser = argparse.ArgumentParser()
    parser.add_argument("--policy", default="TD3")                  # Policy name (TD3, DDPG or OurDDPG)
    parser.add_argument("--env", default="FetchReach-v1")          # OpenAI gym environment name
    parser.add_argument("--seed", default=0, type=int)              # Sets Gym, PyTorch and Numpy seeds
    parser.add_argument("--start_timesteps", default=1e3, type=int)# Time steps initial random policy is used
    parser.add_argument("--eval_freq", default=1e3, type=int)       # How often (time steps) we evaluate
    parser.add_argument("--max_timesteps", default=1e8, type=int)   # Max time steps to run environment
    parser.add_argument("--expl_noise", default=0.1)                # Std of Gaussian exploration noise
    parser.add_argument("--batch_size", default=1024, type=int)      # Batch size for both actor and critic
    parser.add_argument("--discount", default=0.99)                 # Discount factor
    parser.add_argument("--tau", default=0.005)                     # Target network update rate
    parser.add_argument("--policy_noise", default=0.2)              # Noise added to target policy during critic update
    parser.add_argument("--noise_clip", default=0.5)                # Range to clip target policy noise
    parser.add_argument("--policy_freq", default=2, type=int)       # Frequency of delayed policy updates
    parser.add_argument("--save_model", action="store_true")        # Save model and optimizer parameters
    parser.add_argument("--load_model", default="")                 # Model load file name, "" doesn't load, "default" uses file_name
    args = parser.parse_args()

    if not os.path.exists("./results"):
        os.makedirs("./results")
    if args.save_model and not os.path.exists("./models"):
        os.makedirs("./models")

    # init state and action
    action = np.zeros(2)
    state = np.zeros(15, np.float32)
    next_state = np.zeros(15, np.float32)
    state_dim = 15
    action_dim = 2
    max_action = 1.0

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    kwargs = {
        "state_dim": state_dim,
        "action_dim": action_dim,
        "max_action": max_action,
        "discount": args.discount,
        "tau": args.tau,
    }

    # Initialize TD3
    # Target policy smoothing is scaled wrt the action scale
    kwargs["policy_noise"] = args.policy_noise * max_action
    kwargs["noise_clip"] = args.noise_clip * max_action
    kwargs["policy_freq"] = args.policy_freq
    policy = TD3.TD3(**kwargs)
    # load model
    policy.actor.load("./models/actor" + ".pth")
    policy.critic.load("./models/critic" + ".pth")
    print("TD3 agent initialized")

    replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
    print("ReplayBuffer initialized")

    # test agent
    t_succ = 0
    for t in range(100000000):
        state = env.reset()
        done = False
        while not done:
            action = policy.select_action(state)
            state,reward,done,success = env.step(action[0],action[1])
            if success == True:
                t_succ += 1
                print("success: ",t,"/",t_succ)
            # render
            canvas_clone = env.render()
            # draw the action by arrow from robot R
            cv2.arrowedLine(canvas_clone, (int(env.R.pos_x), int(env.R.pos_y)), (int(env.R.pos_x + 100*action[0]), int(env.R.pos_y + 100*action[1])), (0, 255, 255), 1)
            cv2.imshow('canvas', canvas_clone)
            # press esc to exit
            k = cv2.waitKey(1)
            if k == 27:
                break

    state = env.reset()
    next_state = state
    episode_reward = -1
    episode_timesteps = 0
    episode_num = 0
    train_counter = 0

    for t in range(int(args.max_timesteps)):

        episode_timesteps += 1
        train_counter += 1

        # print(t / args.start_timesteps)

        # Select action randomly or according to policy
        if t < args.start_timesteps:
            # take ramdon action
            action = np.random.uniform(-max_action, max_action, size=action_dim)
        else:
            action = (
                policy.select_action(np.array(state))
                + np.random.normal(0, max_action * args.expl_noise, size=action_dim)
            ).clip(-max_action, max_action)

        # Perform action
        next_state,reward,done,success = env.step(action[0],action[1])
        done_bool = float(done) if episode_timesteps < env.episode_steps else 0

        # Store data in replay buffer
        replay_buffer.add(state, action, next_state, reward, done_bool)
        state = next_state.copy()
        episode_reward += reward

        # Train agent after collecting sufficient data
        if t >= args.start_timesteps :
            # print("train")
            for nu in range(1):
                policy.train(L, replay_buffer, args.batch_size)

        if done:
            L.log('eval/episode_reward', episode_reward, t)
            # Reset environment
            state = env.reset()
            done = False
            episode_reward = -1
            episode_timesteps = 0
            episode_num += 1 

        # Evaluate episode
        if (t + 1) % args.eval_freq == 0:
            sr1,rew1 = evaluate(policy, env)
            L.log('eval/success_rate1', sr1, t)
            L.log('eval/reward1', rew1, t)
            # save the agent
            # policy.actor.save("./models/actor" + ".pth")
            # policy.critic.save("./models/critic" + ".pth")
    
if __name__ == '__main__':
    main()

