import gym
import gym_env
import math
import cv2
import airsim
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSignal
import numpy as np
from gym import spaces
from configparser import ConfigParser
import torch
import queue
from .multirotor_airsim import MultirotorAirsim


class AirsimGymEnv(gym.Env, QtCore.QThread):
    action_signal = pyqtSignal(int, np.ndarray)
    state_signal = pyqtSignal(int, np.ndarray)
    attitude_signal = pyqtSignal(int, np.ndarray, np.ndarray)
    reward_signal = pyqtSignal(int, float, float)
    pose_signal = pyqtSignal(np.ndarray, np.ndarray, np.ndarray, np.ndarray)

    def __init__(self, cfg) -> None:
        super().__init__()
        print("init airsim-gym-env.")
        self.model = None
        self.data_path = None
        self.set_config(cfg)

    def set_config(self, cfg: ConfigParser):
        self.cfg = cfg
        self.env_name = cfg.get("options", "env_name")

        self.dynamic_model = MultirotorAirsim(self.cfg)

        goal_distance = self.cfg.getint("options", "goal_distance")

        self.dynamic_model.goal_distance = goal_distance

        self.dynamic_model.start_position = [0, 0, 50]

        self.work_space_x = [-goal_distance-100, goal_distance+100]
        self.work_space_y = [-goal_distance-100, goal_distance+100]
        self.work_space_z = [0, 350]
        self.max_episode_steps = 10000

        self.client = self.dynamic_model.client

        self.state_feature_length = self.dynamic_model.state_feature_length

        self.cnn_feature_length = self.cfg.getint('options', 'cnn_feature_num')

        self.episode_num = 0
        self.total_step = 0
        self.step_num = 0
        self.cumulated_episode_reward = 0
        self.previous_distance_from_des_point = 0

        self.crash_distance = cfg.getint('environment', 'crash_distance')
        self.accept_radius = cfg.getint('environment', 'accept_radius')

        self.max_depth_meters = cfg.getint('environment', 'max_depth_meters')
        self.screen_height = cfg.getint('environment', 'screen_height')
        self.screen_width = cfg.getint('environment', 'screen_width')

        self.trajectory_list = []

        self.observation_space = spaces.Box(low=0, high=1,
                                            shape=(1,
                                                   self.cnn_feature_length + self.state_feature_length),
                                            dtype=np.float32)

        self.action_space = self.dynamic_model.action_space

    def reset(self):
        self.dynamic_model.reset()
        self.episode_num += 1
        self.step_num = 0
        self.cumulated_episode_reward = 0
        self.trajectory_list = []
        self.collision_info = self.client.simGetCollisionInfo()
        self.position = self.dynamic_model.get_position()
        self.previous_distance_from_des_point = self.dynamic_model.goal_distance
        obs = self.get_obs()
        self.yaw_rate_queue = queue.Queue(10)
        return obs

    def step(self, action):
        self.dynamic_model.set_action(action)
        self.collision_info = self.client.simGetCollisionInfo()
        self.position = self.dynamic_model.get_position()
        self.trajectory_list.append(self.position)

        obs = self.get_obs()
        done, is_not_inside_workspace_now, has_reached_des_pose, too_close_to_obstable = self.is_done()
        info = {
            'is_success': has_reached_des_pose,
            'is_crash': too_close_to_obstable,
            'is_not_in_workspace': is_not_inside_workspace_now,
            'step_num': self.step_num
        }
        if done:
            print(info)

        reward = self.compute_reward(done, info, action)

        self.cumulated_episode_reward += reward

        self.print_train_info_airsim(action, obs, reward, info)

        self.set_pyqt_signal_multirotor(action, reward)

        self.step_num += 1
        self.total_step += 1
        return obs, reward, done, info

    def get_obs(self):
        responses = self.client.simGetImages([
            airsim.ImageRequest("0", airsim.ImageType.DepthVis, True)
        ])

        if responses[0].width == 0:
            return None

        depth_image = airsim.list_to_2d_float_array(
            responses[0].image_data_float,
            responses[0].width,
            responses[0].height)

        depth_image = depth_image[::-1, ::1]  # reverse y axis

        depth_image = cv2.resize(
            depth_image, (self.screen_width, self.screen_height)
        )

        depth_meter = depth_image * self.max_depth_meters
        self.min_distance_to_obstacles = depth_meter.min()

        image_scaled = np.clip(
            depth_meter, 0, self.max_depth_meters)/self.max_depth_meters*255

        image_scaled = 255-image_scaled
        image_uint8 = image_scaled.astype(np.uint8)

        split_row = 16
        split_col = 16  # ensure split_row*split_col = cnn_feature_num

        v_split_list = np.vsplit(image_uint8, split_row)
        split_final = []

        for i in range(split_row):
            h_split_list = np.hsplit(v_split_list[i], split_col)
            for j in range(split_col):
                split_final.append(h_split_list[j].max())

        img_feature = np.array(split_final)/255.0

        state_feature = self.dynamic_model._get_state_feature()/255

        feature_all = np.concatenate((img_feature, state_feature), axis=0)

        self.feature_all = feature_all

        feature_all = np.reshape(feature_all, (1, len(feature_all)))
        return feature_all

    def compute_reward(self, done, info, action):
        reward = 0
        reward_reach = 10
        reward_crash = -20
        reward_outside = -10

        if not done:
            yaw_rate_reg = action[-1]
            if self.yaw_rate_queue.full():
                self.yaw_rate_queue.get()
            self.yaw_rate_queue.put(yaw_rate_reg)
            if self.yaw_rate_queue.qsize()>1:
                yaw_std = np.std(np.gradient(list(self.yaw_rate_queue.queue)))
                # print(yaw_std,list(self.yaw_rate_queue.queue))
            else:
                yaw_std = 0
            # print(yaw_std)
            distance_now = self.get_distance_to_goal_3d()
            reward_distance = (self.previous_distance_from_des_point -
                               distance_now)/self.dynamic_model.goal_distance*5000

            self.previous_distance_from_des_point = distance_now
            action_cost = 0.1*abs(action[-1]) / \
                self.dynamic_model.yaw_rate_max_rad
            yaw_error = self.dynamic_model.state_raw[2]
            yaw_error_cost = 0.1*abs(yaw_error/180)
            reward = reward_distance -  action_cost - yaw_error_cost

        elif info["is_success"]:
            reward = reward_reach
        elif info["is_crash"]:
            reward = reward_crash
        elif info["is_not_in_workspace"]:
            reward = reward_outside
        return reward

    def is_done(self):
        episode_done = False

        is_not_inside_workspace_now = self.is_not_inside_workspace()
        has_reached_des_pose = self.is_in_desired_pose()
        too_close_to_obstable = self.is_crashed()

        # We see if we are outside the Learning Space
        episode_done = is_not_inside_workspace_now or\
            has_reached_des_pose or\
            too_close_to_obstable or\
            self.step_num >= self.max_episode_steps

        return episode_done, is_not_inside_workspace_now, has_reached_des_pose, too_close_to_obstable

    def is_not_inside_workspace(self):
        """
        Check if the Drone is inside the Workspace defined
        """
        is_not_inside = False
        current_position = self.position

        if current_position[0] < self.work_space_x[0] or current_position[0] > self.work_space_x[1] or \
            current_position[1] < self.work_space_y[0] or current_position[1] > self.work_space_y[1] or \
                current_position[2] < self.work_space_z[0] or current_position[2] > self.work_space_z[1]:
            print(current_position,self.work_space_x,self.work_space_y,self.work_space_z)
            is_not_inside = True

        return is_not_inside

    def is_in_desired_pose(self):
        in_desired_pose = False
        distance = self.get_distance_to_goal_3d()
        if distance < self.accept_radius:
            in_desired_pose = True

        return in_desired_pose

    def is_crashed(self):
        is_crashed = False
        collision_info = self.collision_info
        if collision_info.has_collided or self.min_distance_to_obstacles < self.crash_distance:
            is_crashed = True

        return is_crashed

    def get_distance_to_goal_3d(self):
        goal_pose = self.dynamic_model.goal_position
        relative_pose_x = self.position[0] - goal_pose[0]
        relative_pose_y = self.position[1] - goal_pose[1]
        relative_pose_z = self.position[2] - goal_pose[2]

        return math.sqrt(pow(relative_pose_x, 2) + pow(relative_pose_y, 2) + pow(relative_pose_z, 2))

    def print_train_info_airsim(self, action, obs, reward, info):
        msg_train_info = "EP: {} Step: {} Total_step: {}".format(
            self.episode_num, self.step_num, self.total_step)
        self.client.simPrintLogMessage('Train: ', msg_train_info)
        self.client.simPrintLogMessage('Action: ', str(action))
        self.client.simPrintLogMessage('reward: ', "{:4.4f} total: {:4.4f}".format(
            reward, self.cumulated_episode_reward))
        self.client.simPrintLogMessage('Info: ', str(info))
        self.client.simPrintLogMessage(
            'Feature_norm: ', str(self.dynamic_model.state_norm))
        self.client.simPrintLogMessage(
            'Feature_raw: ', str(self.dynamic_model.state_raw))
        self.client.simPrintLogMessage(
            'Min_depth: ', str(self.min_distance_to_obstacles))

    def set_pyqt_signal_multirotor(self, action, reward):
        step = int(self.total_step)

        # transfer 2D state and action to 3D
        state = self.dynamic_model.state_raw
        action_output = action
        state_output = state
        self.action_signal.emit(step, action_output)
        self.state_signal.emit(step, state_output)

        # other values
        self.attitude_signal.emit(step, np.asarray(self.dynamic_model.get_attitude(
        )), np.asarray(self.dynamic_model.get_attitude_cmd()))
        self.reward_signal.emit(step, reward, self.cumulated_episode_reward)
        self.pose_signal.emit(np.asarray(self.dynamic_model.goal_position), np.asarray(
            self.dynamic_model.start_position), np.asarray(self.dynamic_model.get_position()), np.asarray(self.trajectory_list))
