import math
import json
import subprocess

import numpy as np
import requests

from mozi_utils import geo
from my_envs import etc
from my_envs.agent import Agent, EscapeAgent
from utils.feed_forward_net import SingleLayerFeedForward

from math import radians
import random
import math
from utils.email import send_info_from_email
import heapq


def get_random_sign():
    x = random.randint(-1,1)
    if x >= 0:
        return 1
    else:
        return -1


class Location:

    def __init__(self):
        self.xs = []
        self.ys = []
        self.speeds = []
        self.headings = []

    def add(self, x, y, speed, heading):
        self.xs.append(x)
        self.ys.append(y)
        self.speeds.append(speed)
        self.headings.append(heading)


class Locations:

    def __init__(self):
        self.priority_queue = []
        self.max_num = 50
        self.min_distance = etc.pursuit_distance

    def push(self, reward, location):
        if len(self.priority_queue) == self.max_num:
            max_reward = self.priority_queue[self.max_num-1][0]
            if reward < max_reward:
                self.priority_queue.pop()
                heapq.heappush(self.priority_queue, (reward, location))
        else:
            heapq.heappush(self.priority_queue, (reward, location))

    def getLocation(self):
        _, location = heapq.heappop(self.priority_queue)
        return location

    def isNull(self):
        return len(self.priority_queue) == 0

    def get_random_location(self):
        loc = Location()
        for cnt in range(etc.agent_num):
            d = random.randint(self.min_distance, etc.boundary_distance)
            degree = random.randint(0, 360)
            x = d * math.cos(math.radians(degree))
            y = d * math.sin(math.radians(degree))
            ship_speed = random.randint(0, 5)
            ship_heading = random.randint(0, 360)
            loc.add(x, y, ship_speed, ship_heading)
        return loc


class BaseEnv:

    # 追捕的智能体数量
    agent_num = 4

    # 围捕的角度阈值
    max_degree = 360 / agent_num

    # 围捕圈的距离半径
    max_distance = 200.0

    # 负责围捕的智能体集合
    pursuit_agents = []

    # 逃逸智能体
    escape_agent : EscapeAgent

    # 智能体的奖励奖励
    rewards = []

    # 平均剩余围捕距离
    mean_distance = 0.0

    # 前馈神经网络
    feed_forward_net: SingleLayerFeedForward

    """辅助成员"""
    var: float

    """智能体之间的观测状态
        1.两个USV之间的距离
        2.USV之间的舷角
        3.USV之间的围捕距离差值
        4.USV之间的围捕角度（跟全局的状态也有关联）
    """
    #
    pursuit_degree = []

    # 相邻的两个USV之间的围捕角度差
    pursuit_degree_diff = []

    def __init__(self, agent_num=etc.agent_num, max_distance=etc.pursuit_distance):
        if etc.use_state_network:
            self.obs_dim = etc.complex_obs_dim
        else:
            self.obs_dim = etc.simple_obs_dim
        self.action_dim = etc.action_dim
        self.agent_num = agent_num
        self.expected_degree = 360 / self.agent_num

        self.max_distance = etc.start_distance_times * etc.pursuit_distance
        self.decrease_distance = self.max_distance - max_distance
        self.decrease_times = etc.start_distance_times - 1

        self.rewards = np.zeros(self.agent_num, dtype=float)

        self.scenairo_rewards = np.zeros(self.agent_num, dtype=float)
        self.pursuit_degree_diff = np.zeros(self.agent_num, dtype=float)
        self.pursuit_agents = np.empty(self.agent_num, dtype=Agent)
        self.neighbor_pursuit_degree = np.zeros(self.agent_num, dtype=float)
        self.abs_neighbor_pursuit_degree = np.zeros(self.agent_num, dtype=float)

        # usv之间的直线距离之差
        self.distance_between_usv = np.zeros((self.agent_num, self.agent_num), dtype=float)
        # 围捕距离之差
        self.pursuit_distance_diff = np.zeros((self.agent_num, self.agent_num), dtype=float)
        # 围捕角度
        self.pursuit_degree = np.zeros((self.agent_num, self.agent_num), dtype=float)
        # usv之间的舷角
        self.bearing_between_usv = np.zeros((self.agent_num, self.agent_num), dtype=float)
        # 在init时，做服务端的初始化
        self.testMode = etc.testMode
        self.step_num = 0
        self.scenairo_num = 0
        self.scenairo_step_num = 0
        self.eval_mode = False
        self.escape_speed = 0
        self.escape_agent = None
        self.num_success = 0
        self.total_num_success = 0
        self.degree_success = False
        self.success = False
        self.max_scenairo_reward = 0
        self.locs = Locations()
        self.loc = None

    def reset_agent_num(self, new_agent_num):
        if self.agent_num > new_agent_num:
            self.agent_num = new_agent_num
            self.rewards = self.rewards[0:self.agent_num]
            self.scenairo_rewards = self.scenairo_rewards[0:self.agent_num]
            self.pursuit_degree_diff = self.pursuit_degree_diff[0:self.agent_num]
            self.pursuit_agents = self.pursuit_agents[0:self.agent_num]
            self.neighbor_pursuit_degree = self.neighbor_pursuit_degree[0:self.agent_num]
            self.abs_neighbor_pursuit_degree = self.neighbor_pursuit_degree[0:self.agent_num]
            self.distance_between_usv = self.distance_between_usv[0:self.agent_num]
            self.pursuit_distance_diff = self.pursuit_distance_diff[0:self.agent_num]
            self.pursuit_degree = self.pursuit_degree[0:self.agent_num]
            self.bearing_between_usv = self.bearing_between_usv[0:self.agent_num]
            self.expected_degree = 360 / self.agent_num
        return self.update_state()

    def succeed(self):
        distance_num = 0
        degree_num = 0
        for i in range(self.agent_num):
            if self.pursuit_agents[i].distance_remaining <= 0:
                distance_num += 1
            if  self.expected_degree - etc.degree_error <= self.abs_neighbor_pursuit_degree[i] \
                    <= self.expected_degree + etc.degree_error:
                degree_num += 1
        self.degree_success = degree_num == self.agent_num
        success = distance_num == self.agent_num and self.degree_success
        if success:
            self.num_success += 1
            self.total_num_success += 1
        elif self.success:
            self.num_success = 0
        self.success = success
        return success
        # num = 0
        # for i in range(self.agent_num):
        #     if self.pursuit_agents[i].distance_remaining <= 0:
        #         num += 1
        # return num == self.agent_num

    def is_done(self):
        return self.scenairo_step_num >= etc.scenairo_max_num

    def set_eval_mode(self):
        self.eval_mode = True
        self.escape_speed = etc.max_escape_speed
        self.locs.min_distance = etc.boundary_distance / 2
        if self.max_distance >= etc.pursuit_distance:
            self.max_distance = self.max_distance - self.decrease_distance

    def update_observation_state(self):
        d = []
        degree_list = []
        for i in range(self.agent_num):
            degree_list.append([i, self.pursuit_agents[i].degree_to_goal])
        self.sorted_degree_list = sorted(degree_list, key=lambda x:x[1])
        for i in range(self.agent_num):
            curr = self.sorted_degree_list[i][0]
            last = (i-1+self.agent_num) % self.agent_num
            self.neighbor_pursuit_degree[curr] = (self.sorted_degree_list[i][1] - self.sorted_degree_list[last][1] + 360) % 360
            if self.neighbor_pursuit_degree[curr] > 180:
                self.abs_neighbor_pursuit_degree[curr] = 360 - self.neighbor_pursuit_degree[curr]
            else:
                self.abs_neighbor_pursuit_degree[curr] = self.neighbor_pursuit_degree[curr]
        for i in range(self.agent_num):
            curr = self.sorted_degree_list[i][0]
            next = self.sorted_degree_list[(i+1)%self.agent_num][0]
            self.pursuit_degree_diff[curr] = self.neighbor_pursuit_degree[curr] - self.neighbor_pursuit_degree[next]
            # self.pursuit_degree_diff[curr] = math.fabs(self.pursuit_degree_diff[curr])

        for i in range(self.agent_num):
            A = self.pursuit_agents[i]
            for j in range(self.agent_num):
                if i == j:
                    continue
                B = self.pursuit_agents[j]
                self.pursuit_distance_diff[i][j] = A.distance_remaining - B.distance_remaining
                self.pursuit_degree[i][j] = (A.degree_to_goal - B.degree_to_goal + 360) % 360
                self.distance_between_usv[i][j] = geo.get_two_point_distance(A.x, A.y,
                                                                             B.x, B.y)
                angel = geo.get_degree(A.y, A.x, B.y, B.x)
                self.bearing_between_usv[i][j] = A.ship_heading - angel
            d.append(self.pursuit_agents[i].distance_remaining)

        # 全局状态变量的更新
        self.mean_distance = np.mean(d)
        self.var = np.var(d)
        obs = []
        for i in range(self.agent_num):
            obs.append(self.get_state_of_agent(i))
        self.obs = obs
        return obs

    def reset(self):
        """
        实现场景的初始化或重置
        :return: scenario
        """
        # reset相当于重新开始一个场景，需要重新建立连接
        self.scenairo_num += 1
        self.scenairo_step_num = 0
        self.num_success = 0
        self.total_num_success = 0
        self.escape_agent = EscapeAgent()
        if etc.use_env_buffer == False or self.locs.isNull() or random.random() < 0.5:
            self.loc = self.locs.get_random_location()
        else:
            self.loc = self.locs.getLocation()
        for cnt in range(self.agent_num):
            x = self.loc.xs[cnt]
            y = self.loc.ys[cnt]
            ship_speed = self.loc.speeds[cnt]
            ship_heading = self.loc.headings[cnt]
            self.pursuit_agents[cnt] = Agent(self.escape_agent.x, self.escape_agent.y, x, y, ship_speed, ship_heading,
                                             self.max_distance)
            self.scenairo_rewards[cnt] = 0.0
        return self.update_observation_state()

    def step(self, actions):
        """
        step实现基于决策，使得场景向前推演
        :return: scenario
        """
        for i in range(self.agent_num):
            # 这里计算的实际上是期望速度和期望航向角
            new_speed = self.pursuit_agents[i].current_speed + actions[i][0] * etc.usv_max_acceleration
            if new_speed < 0:
                new_speed = 0.0
            if new_speed > etc.usv_max_speed:
                new_speed = etc.usv_max_speed
            new_heading = self.pursuit_agents[i].ship_heading + actions[i][1] * etc.usv_max_roll
            self.pursuit_agents[i].step(new_speed, new_heading)

        self.escape_agent.step(self.escape_speed, self.cal_escape_heading())

        obs = self.update_state()
        done = self.is_done()
        rewards = self.cal_rewards()
        success = self.succeed()

        self.step_num += 1
        self.scenairo_step_num += 1

        # if self.success:
        #     # for agent in self.pursuit_agents:
        #     #     agent.print_info()
        #     print("成功围捕", self.num_success, "次\n")


        if done:
            obs = self.obs
            rewards = self.rewards
            self.scenairo_step_num = 0
            message = "[ "
            for i in range(self.agent_num):
                message += str(round(self.scenairo_rewards[i]/etc.scenairo_max_num, 2)) + " "
            average_reward = np.sum(self.scenairo_rewards)
            self.max_scenairo_reward = max(self.max_scenairo_reward ,average_reward)
            if etc.use_env_buffer:
                self.locs.push(average_reward, self.loc)
            message += "]"
            print("第", self.scenairo_num, "轮场景结束，成功围捕", self.total_num_success, "次，奖励为"+message)
        if self.step_num % etc.print_internal == 0:
            if etc.print_info_in_terminate:
                print("\n-------------------------------")
                print("step_num: "+str(self.step_num))
                print("rewards: ", rewards)
                for i in range(self.agent_num):
                    print(self.pursuit_agents[i].distance_remaining)
                print("degree_diff: ", self.pursuit_degree_diff)
                print("neighbor_pursuit_degree: ", self.neighbor_pursuit_degree)
                print("-------------------------------\n")
            if self.eval_mode == False:
                if self.max_distance >= etc.pursuit_distance and self.step_num >= 200000:
                    self.max_distance -= self.decrease_distance * 0.001
                self.escape_speed = etc.max_escape_speed * self.step_num / 1000000

        return obs, rewards, done

    def step_to_goal(self, i):
        # 这里计算的实际上是期望速度和期望航向角
        sign = self.pursuit_agents[i].relative_bearing / math.fabs(self.pursuit_agents[i].relative_bearing)
        size = math.fabs(self.pursuit_agents[i].relative_bearing) / etc.usv_max_roll
        if size > 4.0:
            size = 4.0
        size = int(size)
        return 4, -size * sign

    def update_state(self):
        """
        实现所有的状态量的更新
        :return: 状态，奖励，场景是否完结
        """
        # 接收仿真端发送的态势
        # 智能体自身状态的更新
        for cnt in range(self.agent_num):
            self.pursuit_agents[cnt].update_local_state(
                self.escape_agent.x, self.escape_agent.y, self.max_distance)

        # 智能体对其他智能体观测状态的更新
        return self.update_observation_state()

    def get_state_of_agent(self, i: int):
        """
        这里进行单个智能体的状态序列计算，可以直接作为策略网络的输入值
        :param i: 该状态的标号
        :return: 策略网络的输入状态，即一个72维度的tensor张量
        """
        agent = self.pursuit_agents[i]
        # 本地状态
        if self.obs_dim == 16:
            o_i = [radians(agent.relative_bearing), radians(agent.degree_changing), (agent.distance_remaining)/ 1000,
               agent.distance_changing / 10,
               agent.current_speed / 10, agent.speed_changing / 10]
        else:
            o_i = [radians(agent.relative_bearing), radians(agent.degree_changing),
                   #(agent.distance_remaining + etc.pursuit_distance) / 1000,
                   agent.distance_changing / 10,
                   agent.current_speed / 10, agent.speed_changing / 10]
        state = np.array(o_i)
        # 全局状态
        o_all = [self.pursuit_degree_diff[i], self.mean_distance]

        if etc.use_state_network:

            o_list = []
            for j in range(self.agent_num):
                if j == i:
                    continue
                o_ij = [self.distance_between_usv[i][j], self.bearing_between_usv[i][j], self.bearing_between_usv[j][i],
                        self.pursuit_distance_diff[i][j], self.pursuit_degree[i][j]]
                o_list.append(o_ij)
            state = np.concatenate((state, o_ij))
            state = np.concatenate((state, np.array(o_all)))
            return state
        else:
            # state = np.array(o_i)
            # o_list = []
            # for j in range(self.agent_num):
            #     if j == i:
            #         continue
            #     o_ij = [self.distance_between_usv[i][j], self.bearing_between_usv[i][j], self.bearing_between_usv[j][i],
            #             self.pursuit_distance_diff[i][j], self.pursuit_degree[i][j]]
            #     o_list.append(o_ij)
            #     state = np.concatenate((state, o_ij))
            # state = np.concatenate((state, np.array(o_all)))
            o_list = []
            for j in range(self.agent_num):
                curr = self.sorted_degree_list[j][0]
                if i == curr:
                    next = self.sorted_degree_list[(j+1)% self.agent_num][0]
                    last = self.sorted_degree_list[(j-1+self.agent_num)%self.agent_num][0]
                    def append_observation(curr):
                        o_ij = [self.distance_between_usv[i][curr] / 1000, radians(self.bearing_between_usv[i][curr]),
                                radians(self.bearing_between_usv[curr][i]), #self.pursuit_distance_diff[i][curr] / 1000,
                                radians(self.pursuit_degree[i][curr])]
                        o_list.append(o_ij)
                    append_observation(next)
                    append_observation(last)
            o_all = [self.mean_distance / 1000, radians(self.pursuit_degree_diff[i])]
            total_state = np.concatenate((state, np.array(o_list).flatten(), np.array(o_all)))
            return total_state

    def cal_rewards(self):
        """
        场景在每一步更新之后，计算每个追捕智能体获得的奖励
        :return:
        """
        cap_num = 0
        for i in range(self.agent_num):
            agent = self.pursuit_agents[i]
            if agent.distance_remaining <= 0:
                cap_num += 1

        cnt = 0
        for agent in self.pursuit_agents:
            reward = 0.0
            # 距离奖励
            angel1 = math.fabs(math.fabs(self.neighbor_pursuit_degree[cnt]) - self.expected_degree)
            angel2 = math.fabs(
                math.fabs(self.neighbor_pursuit_degree[(cnt + 1) % self.agent_num]) - self.expected_degree)
            # reward += (math.exp(- etc.k3 * max(angel1, angel2)) - 1) * etc.w3
            angel_reward = -0.05 * max(angel1, angel2) - 0.28 * math.fabs(self.pursuit_degree_diff[cnt])
            if agent.distance_remaining > 0:
                reward += - 0.05 * agent.distance_remaining
            else:
                reward += 30
                # if cap_num == self.agent_num:
                #     reward += (100 + angel_reward)
                #     if self.degree_success:
                #         reward += 100
            if self.success:
                reward += 40
            reward += angel_reward / 10
            if math.fabs(self.pursuit_degree_diff[cnt]) <= etc.degree_error:
                reward += 10

            # # 距离一致性奖励
            # x = - etc.k2 * (agent.distance_remaining - self.mean_distance) / math.sqrt(self.var)
            # reward += (math.exp(x) - 1) * etc.w2
            # 碰撞惩罚
            min_d = 10000.0
            for i in range(self.agent_num):
                if cnt == i:
                    continue
                min_d = min(self.distance_between_usv[cnt][i], min_d)
            min_d = min(min_d, self.pursuit_agents[i].distance_remaining + self.max_distance)
            if min_d >= 20:
                reward -= math.exp(16 / min_d)
            elif min_d >= 10:
                reward -= math.exp(35 /min_d)
            else:
                reward -= math.exp(7)

            # if self.success:
            #     reward += 500

            self.rewards[cnt] = reward / 10
            self.scenairo_rewards[cnt] += self.rewards[cnt]
            cnt += 1
        return self.rewards

    def cal_escape_heading(self):
        # p_m = np.array([self.self.escape_agent.x, self.self.escape_agent.y])
        p_m = np.array([self.escape_agent.y, self.escape_agent.x])
        p = np.zeros(2, dtype=float)
        for agent in self.pursuit_agents:
            p_i = np.array([agent.y, agent.x]) - p_m
            d_i = agent.distance_remaining + self.max_distance
            if d_i < 0.5:
                continue
            p_mi = p_i / (d_i * d_i)
            # print(p_mi)
            p = p + p_mi
        d = math.sqrt(p[0] * p[0] + p[1] * p[1])
        p = p / math.sqrt(p[0] * p[0] + p[1] * p[1]) * 0.05
        # 注意，这个p向量实际是期望点的经纬度坐标
        p = p_m - p
        contact_heading = geo.get_degree(p_m[0], p_m[1], p[0], p[1])
        return contact_heading
