# -*- coding: utf-8 -*-
"""
多机协同路径规划强化学习环境
"""

import gym
import math
import random
import numpy as np
import matplotlib
matplotlib.use('Agg')  # 使用 Agg 后端，适合无图形界面环境
import matplotlib.pyplot as plt
from gym import spaces
from copy import deepcopy
from pathlib import Path
from collections import deque
from scipy.integrate import odeint,solve_ivp
from shapely import geometry as geo
from shapely.plotting import plot_polygon

__all__ = ["MultiAgentPathPlanning"]


# ----------------------------- ↓↓↓↓↓ 地图设置 ↓↓↓↓↓ ------------------------------#
class MAP:
    size = [[-300.0, -300.0], [300.0, 300.0]] # x, y最小值; x, y最大值
    start_pos = [-200, -200]                   # 起点坐标（左下角）
    end_pos = [200, 200]                    # 终点坐标（右上角）
    start_yaw = math.pi/4                 # 起点航向（朝向东北方向）
    obstacles = [                         # 障碍物, 要求为 geo.Polygon 或 带buffer的 geo.Point/geo.LineString
        geo.Point(0, 0).buffer(30),  # 减小障碍物半径
    ]

    @classmethod
    def show(cls):
        plt.rcParams['font.sans-serif'] = ['DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        plt.close('all')
        fig, ax = plt.subplots()
        ax.clear()
        cls.plot(ax)
        ax.scatter(cls.start_pos[0], cls.start_pos[1], s=30, c='k', marker='x', label='起点')
        ax.scatter(cls.end_pos[0], cls.end_pos[1], s=30, c='k', marker='o', label='终点')
        ax.legend(loc='best').set_draggable(True)
        plt.show(block=True)

    @classmethod
    def plot(cls, ax, title='Map'):
        ax.clear()
        ax.set_aspect('equal')
        ax.set_title(title)
        ax.set_xlabel("x")
        ax.set_ylabel("z")
        ax.grid(alpha=0.3, ls=':')
        ax.set_xlim(cls.size[0][0], cls.size[1][0])
        ax.set_ylim(cls.size[0][1], cls.size[1][1])
        # ax.invert_yaxis()
        for o in cls.obstacles:
            plot_polygon(o, ax=ax, facecolor='w', edgecolor='k', add_points=False)


# ----------------------------- ↓↓↓↓↓ 动力学避障环境 ↓↓↓↓↓ ------------------------------#
class Logger:
    pass

# 运动速度设置
V_LOW = 8  # 降低最小速度
V_HIGH = 15  # 最大速度
V_CRUISE = 12  # 巡航速度
# 质心动力学状态设置
STATE_LOW = [MAP.size[0][0], MAP.size[0][1], V_LOW, -math.pi]  # x, z, V, ψ
STATE_HIGH = [MAP.size[1][0], MAP.size[1][1], V_HIGH, math.pi]  # x, z, V, ψ

# 控制设置
CTRL_LOW = [-0.3, -0.2]  # 切向过载 + 偏航角速率(单位rad/s)
CTRL_HIGH = [0.3, 0.2]   # 切向过载 + 偏航角速率(单位rad/s)

# 距离设置
D_SAFE = 15  # 增加碰撞半径
D_BUFF = 40  # 增加缓冲距离
D_GOAL = 30  # 增加目标区域范围

# 序列观测长度
TIME_STEP = 4


class MultiAgentPathPlanning(gym.Env):
    """从力学与控制的角度进行多机协同规划 (东北天坐标系)
    >>> dx/dt = V * cos(ψ)
    >>> dz/dt = V * sin(ψ)
    >>> dV/dt = a
    >>> dψ/dt = psi_dot
    >>> u = [a, psi_dot]
    """

    def __init__(self, num_agents=2, max_episode_steps=500, dt=0.1, normalize_observation=True, old_gym_style=True):
        """
        Args:
            num_agents (int): 智能体数量. 默认2.
            max_episode_steps (int): 最大仿真步数. 默认500.
            dt (float): 决策周期. 默认0.1.
            normalize_observation (bool): 是否输出归一化的观测. 默认True.
            old_gym_style (bool): 是否采用老版gym接口. 默认True.
        """
        # 仿真
        self.dt = dt
        self.max_episode_steps = max_episode_steps
        self.num_agents = num_agents
        self.log = Logger()
        # 障碍
        self.obstacles = MAP.obstacles
        # 状态空间 + 控制空间
        self.state_space = spaces.Box(np.array(STATE_LOW), np.array(STATE_HIGH))
        self.control_space = spaces.Box(np.array(CTRL_LOW), np.array(CTRL_HIGH))
        # 观测空间 + 动作空间
        # 每个智能体的观测包含：自身状态(4维) + 其他智能体相对状态(3*(n-1)维) + 目标状态(2维) + 障碍物标志(1维)
        obs_dim = 4 + 3 * (num_agents - 1) + 2 + 1
        self.observation_space = spaces.Box(
            low=np.array([-1] * obs_dim),
            high=np.array([1] * obs_dim),
            dtype=np.float32
        )
        self.action_space = spaces.Box(-1, 1, (len(CTRL_LOW), ))
        # 序列观测
        self.deque_points = deque(maxlen=TIME_STEP)
        self.deque_vector = deque(maxlen=TIME_STEP)
        # 环境控制
        self.__render_not_called = True
        self.__need_reset = True
        self.__norm_observation = normalize_observation
        self.__old_gym = old_gym_style
        # plt设置
        plt.rcParams['font.sans-serif'] = ['DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        plt.close("all")

    def reset(self, mode=0):
        """重置环境
           mode=0, 随机初始化起点终点, 速度、方向随机
           mode=1, 初始化起点终点到地图设置, 速度、方向随机
        """
        self.__need_reset = False
        self.time_step = 0
        
        # 初始化所有智能体的状态
        self.states = []  # 存储所有智能体的状态
        self.ctrls = []   # 存储所有智能体的控制量
        self.start_positions = []  # 存储所有智能体的起始位置
        self.end_positions = []    # 存储所有智能体的目标位置
        
        # 首先确定目标位置（所有智能体共用同一个目标）
        if mode == 0:
            # 随机初始化目标位置
            goal_center = np.array([random.randint(100,200), random.randint(100,200)], dtype=np.float32)
            common_end_pos = goal_center
        else:
            # 使用预设的目标位置
            common_end_pos = np.array(MAP.end_pos, dtype=np.float32)
        
        # 检查目标位置是否在障碍物内
        for o in self.obstacles:
            if o.contains(geo.Point(*common_end_pos)):
                if mode != 0:
                    raise ValueError("地图的目标位置不能设置在障碍里面!!!")
                return self.reset(mode)  # 如果目标在障碍物内，重新初始化
        
        # 为每个智能体初始化状态
        for i in range(self.num_agents):
            max_attempts = 100  # 最大尝试次数
            attempt = 0
            while True:
                state = self.state_space.sample()
                if mode == 0:
                    # 随机初始化起点
                    center = np.array([random.randint(-200,-100), random.randint(-200,-100)], dtype=np.float32)
                    theta = np.random.uniform(0, math.pi/2)
                    state[:2] = center
                    state[3] = theta
                    start_pos = deepcopy(state[:2])
                else:
                    # 使用预设的起点
                    start_pos = np.array(MAP.start_pos, dtype=np.float32)
                    state = np.array([*start_pos[:2], *state[2:]], dtype=np.float32)
                
                # 检查起点是否与障碍物碰撞
                collision = False
                for o in self.obstacles:
                    if o.contains(geo.Point(*start_pos)):
                        if mode != 0:
                            raise ValueError("地图的初始位置不能设置在障碍里面!!!")
                        collision = True
                        break
                
                # 检查与其他智能体的距离
                too_close = False
                for j in range(i):
                    dist = np.linalg.norm(start_pos - self.start_positions[j])
                    if dist < D_SAFE:
                        too_close = True
                        break
                
                attempt += 1
                if attempt >= max_attempts:
                    if mode != 0:
                        raise ValueError(f"无法为智能体{i}找到合适的初始位置，请检查地图设置!")
                    # 在随机模式下，重置所有智能体的位置
                    self.states = []
                    self.start_positions = []
                    i = 0
                    attempt = 0
                    continue
                
                if not collision and not too_close:
                    break
            
            self.states.append(state)
            self.ctrls.append(np.zeros(self.action_space.shape, dtype=np.float32))
            self.start_positions.append(start_pos)
            self.end_positions.append(common_end_pos)  # 所有智能体使用相同的目标位置
        
        # 初始化航程和距离记录
        self.L = [0.0] * self.num_agents  # 每个智能体的航程
        self.D_init = [np.linalg.norm(end - start) for start, end in zip(self.start_positions, self.end_positions)]
        self.D_last = self.D_init.copy()
        
        # 重置日志
        self.log.start_pos = self.start_positions
        self.log.end_pos = self.end_positions
        self.log.path = [[pos] for pos in self.start_positions]
        self.log.ctrl = [[ctrl] for ctrl in self.ctrls]
        self.log.speed = [[state[2]] for state in self.states]
        self.log.yaw = [[state[3]] for state in self.states]
        self.log.length = [[[l, d]] for l, d in zip(self.L, self.D_last)]
        
        # 获取初始观测
        obs = self._get_obs()
        
        # 输出
        if self.__old_gym:
            return obs
        return obs, {}

    def _get_ctrl(self, act, tau=0.9):
        """获取控制"""
        lb = self.control_space.low
        ub = self.control_space.high
        u = lb + (act + 1.0) * 0.5 * (ub - lb) # [-1,1] -> [lb,ub]
        u = np.clip(u, lb, ub) # NOTE 浮点数误差有时会出现类似 1.0000001 情况
        # smooth control signal
        if tau is not None:
            return (1.0 - tau) * self.ctrls[act] + tau * u
        return u

    def _get_obs(self):
        """获取归一化的观测
        返回观测：
        - 自身状态(4维)：位置(x, y)、速度(V)、航向角(ψ)
        - 其他智能体相对状态(3*(n-1)维)：相对位置(dx, dy)、相对航向角(dψ)
        - 目标状态(2维)：目标位置(end_x, end_y)
        - 障碍物标志(1维)：是否在障碍物附近
        """
        obs = []
        for i in range(self.num_agents):
            x, y, V, ψ = self.states[i]
            
            # 计算与障碍物的距离
            if self.obstacles is not None:
                dis_obs = self.obstacles[0].distance(geo.Point(x, y))
                obstacle_flag = 1 if dis_obs < D_SAFE else 0
            else:
                obstacle_flag = 0
            
            # 获取其他智能体的相对状态
            other_agents_obs = []
            for j in range(self.num_agents):
                if i != j:
                    other_x, other_y, _, other_ψ = self.states[j]
                    dx = (other_x - x) / MAP.size[1][0]  # 归一化相对x坐标
                    dy = (other_y - y) / MAP.size[1][1]  # 归一化相对y坐标
                    dψ = self._limit_angle(other_ψ - ψ) / math.pi  # 归一化相对航向角
                    other_agents_obs.extend([dx, dy, dψ])
            
            # 归一化观测
            obs.append(np.array([
                x / MAP.size[1][0],  # 归一化x坐标
                y / MAP.size[1][1],  # 归一化y坐标
                (V - V_LOW) / (V_HIGH - V_LOW),  # 归一化速度
                ψ / math.pi,  # 归一化航向角
                *other_agents_obs,  # 其他智能体的相对状态
                self.end_positions[i][0] / MAP.size[1][0],  # 归一化目标x坐标
                self.end_positions[i][1] / MAP.size[1][1],  # 归一化目标y坐标
                obstacle_flag  # 障碍物标志
            ], dtype=np.float32))
        
        return np.concatenate(obs)

    def _get_rew(self):
        """计算奖励
        奖励组成：
        1. 目标导向奖励：鼓励朝向目标移动
        2. 障碍物奖励：避免碰撞
        3. 智能体间避碰奖励：避免智能体之间碰撞
        4. 效率奖励：鼓励高效直接的路径
        """
        rew = []
        done = []
        info = []
        for i in range(self.num_agents):
            x, y, V, ψ = self.states[i]
            
            # 计算与目标的距离
            dis_goal = np.linalg.norm(self.end_positions[i] - self.states[i][:2])
            distance_improvement = self.D_last[i] - dis_goal
            self.D_last[i] = dis_goal

            # 1. 目标导向奖励
            if dis_goal < D_GOAL:
                # 到达目标给予大奖励
                goal_r = 200.0
                done.append(True)
                info.append({'state': 'success'})
            else:
                # 未到达目标时的奖励由两部分组成：
                # a. 距离奖励：越接近目标奖励越大
                normalized_dist = dis_goal / self.D_init[i]
                distance_r = 2.0 * (1.0 - normalized_dist)
                
                # b. 朝向奖励：当前航向与目标方向的夹角
                target_angle = math.atan2(self.end_positions[i][1] - y, self.end_positions[i][0] - x)
                angle_diff = abs(self._limit_angle(target_angle - ψ))
                heading_r = 2.0 * (1.0 - angle_diff/math.pi)  # 角度差越小奖励越大
                
                # c. 前进奖励：朝目标方向的位移
                if distance_improvement > 0:
                    progress_r = 5.0 * distance_improvement / self.D_init[i]
                else:
                    progress_r = -8.0 * distance_improvement / self.D_init[i]  # 远离目标时给予更大惩罚
                
                goal_r = distance_r + heading_r + progress_r
                done.append(False)
                info.append({'state': 'none'})

            # 2. 障碍物奖励
            obstacle_r = 0
            if self.obstacles is not None:
                dis_obs = self.obstacles[0].distance(geo.Point(x, y))
                if dis_obs < D_SAFE:
                    # 碰撞惩罚
                    obstacle_r = -100.0
                    done[-1] = True
                    info[-1] = {'state': 'fail'}
                elif dis_obs < D_BUFF:
                    # 使用指数衰减的惩罚
                    normalized_dist = (dis_obs - D_SAFE) / (D_BUFF - D_SAFE)
                    obstacle_r = -20.0 * np.exp(-2 * normalized_dist)

            # 3. 智能体间避碰奖励
            agent_collision_r = 0
            for j in range(self.num_agents):
                if i != j:
                    other_pos = self.states[j][:2]
                    dis_agent = np.linalg.norm(np.array([x, y]) - other_pos)
                    if dis_agent < D_SAFE:
                        agent_collision_r = -100.0
                        done[-1] = True
                        info[-1] = {'state': 'fail'}
                    elif dis_agent < D_BUFF:
                        normalized_dist = (dis_agent - D_SAFE) / (D_BUFF - D_SAFE)
                        agent_collision_r = -20.0 * np.exp(-2 * normalized_dist)

            # 4. 效率奖励 - 惩罚不必要的转向和速度变化
            # 计算航向变化率
            if len(self.log.yaw[i]) > 1:
                yaw_change = abs(self._limit_angle(ψ - self.log.yaw[i][-1]))
                efficiency_r = -0.5 * yaw_change  # 惩罚剧烈的转向
            else:
                efficiency_r = 0

            # 总奖励 - 调整权重
            reward = (
                3.0 * goal_r +           # 增加目标导向奖励的权重
                2.0 * obstacle_r +       # 保持避障奖励权重
                2.0 * agent_collision_r + # 保持避碰奖励权重
                1.0 * efficiency_r       # 添加效率奖励
            )

            # 奖励缩放和裁剪
            reward = np.clip(reward, -100.0, 150.0)
            rew.append(reward)
        
        return rew, done, info

    def step(self, act: np.ndarray, tau: float = None):
        """状态转移
        Args:
            act (np.ndarray): 动作a(取值-1~1).
            tau (float): 控制量u(取值u_min~u_max)的平滑系数: u = tau*u + (1-tau)*u_last. 默认None不平滑.
        """
        assert not self.__need_reset, "调用step前必须先reset"
        # 数值积分
        self.time_step += 1
        new_states = []
        for i in range(self.num_agents):
            u = self._get_ctrl(act[i], tau)
            new_state = self._ode45(self.states[i], u, self.dt)
            new_states.append(deepcopy(new_state))
            self.ctrls[i] = deepcopy(u)
        
        # 更新状态
        self.states = new_states
        
        # 更新航程
        for i in range(self.num_agents):
            self.L[i] += np.linalg.norm(new_states[i][:2] - self.states[i][:2])
        
        # 获取转移元组
        obs = self._get_obs()
        rew, done, info = self._get_rew()
        
        # 检查是否结束
        truncated = self.time_step >= self.max_episode_steps
        # if truncated or any(done):
        #     episode_info["terminal"] = True
        #     self.__need_reset = True
        # else:
        #     episode_info["terminal"] = False

        episode_info = {
            "done": done,
            "truncated": truncated,
            "reward": rew,
            "time_step": self.time_step,
            "voyage": self.L,
            "distance": self.D_last,
            "terminal": truncated or any(done)
        }
        
        # 更新信息
        # info["done"] = done
        # info["truncated"] = truncated
        # info["reward"] = rew
        # info["time_step"] = self.time_step
        # info["voyage"] = self.L
        # info["distance"] = self.D_last
        
        # 记录
        for i in range(self.num_agents):
            self.log.path[i].append(self.states[i][:2])
            self.log.ctrl[i].append(self.ctrls[i])
            self.log.speed[i].append(self.states[i][2])
            self.log.yaw[i].append(self.states[i][3])
            self.log.length[i].append([self.L[i], self.D_last[i]])

        # 输出
        if self.__old_gym:
            return obs, rew, done, info
        return obs, rew, done, episode_info

    def render(self, mode="human", figsize=[8,8]):
        """测试时可视化环境, 和step交替调用 (不要和plot一起调用, 容易卡)"""
        assert not self.__need_reset, "调用render前必须先reset"
        # 创建绘图窗口
        if self.__render_not_called:
            self.__render_not_called = False
            with plt.ion():
                fig = plt.figure("render", figsize=figsize)
            ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
            MAP.plot(ax, "Multi-Agent Path Plan Environment")
            self.__plt_car_paths = [ax.plot([], [], 'k-.')[0] for _ in range(self.num_agents)]
            self.__plt_car_points = [ax.scatter([], [], s=15, c='b', marker='o', label=f'Agent {i}') for i in range(self.num_agents)]
            self.__plt_targ_ranges = [ax.plot([], [], 'g:', linewidth=1.0)[0] for _ in range(self.num_agents)]
            self.__plt_targ_points = [ax.scatter([], [], s=15, c='g', marker='o', label=f'Target {i}') for i in range(self.num_agents)]
            ax.legend(loc='best').set_draggable(True)
        
        # 绘图
        for i in range(self.num_agents):
            self.__plt_car_paths[i].set_data(np.array(self.log.path[i]).T)
            self.__plt_car_points[i].set_offsets(self.log.path[i][-1])
            θ = np.linspace(0, 2*np.pi, 18)
            self.__plt_targ_ranges[i].set_data(
                self.log.end_pos[i][0]+D_GOAL*np.cos(θ),
                self.log.end_pos[i][1]+D_GOAL*np.sin(θ)
            )
            self.__plt_targ_points[i].set_offsets(self.log.end_pos[i])
        
        # 窗口暂停
        plt.pause(0.001)

    def close(self): 
        """关闭环境"""
        self.__render_not_called = True
        self.__need_reset = True
        plt.close("render")

    def plot(self, file, figsize=[10,10], dpi=100):
        """训练时观察输出状态 (不要和render一起调用, 容易卡)"""
        file = Path(file).with_suffix(".png")
        file.parents[0].mkdir(parents=True, exist_ok=True)
        fig = plt.figure("Output", figsize=figsize)
        gs = fig.add_gridspec(2, 2) 
        ax1 = fig.add_subplot(gs[0, 0]) 
        ax2 = fig.add_subplot(gs[0, 1])
        ax3 = fig.add_subplot(gs[1, 0])
        ax4 = fig.add_subplot(gs[1, 1])
    
        # 绘制轨迹
        MAP.plot(ax1, "Multi-Agent Trajectory")
        for i in range(self.num_agents):
            ax1.scatter(*self.log.path[i][0], s=30, marker='x', label=f'start {i}')
            ax1.plot(*np.array(self.log.path[i]).T, label=f'path {i}')
        ax1.scatter(*self.log.end_pos[0],c='r', s=30, marker='*', label=f'target')
        ax1.legend(loc="best").set_draggable(True)
        
        # 绘制控制信号
        ax2.set_title("Control Signal")
        ax2.set_xlabel("time step")
        ax2.set_ylabel("control")
        for i in range(self.num_agents):
            ctrl = np.array(self.log.ctrl[i]).T
            for j, u in enumerate(ctrl):
                ax2.plot(u, label=f'agent {i} u{j}')
        ax2.legend(loc="best").set_draggable(True)
        
        # 绘制速度信号
        ax3.set_title("Speed Signal")
        ax3.set_xlabel("time step")
        ax3.set_ylabel("speed")
        for i in range(self.num_agents):
            ax3.plot(self.log.speed[i], label=f'agent {i} V')
        ax3.legend(loc="best").set_draggable(True)
        
        # 绘制航程和距离
        ax4.set_title("Length Signal")
        ax4.set_xlabel("time step")
        ax4.set_ylabel("length")
        for i in range(self.num_agents):
            length = np.array(self.log.length[i]).T
            ax4.plot(length[0], label=f'agent {i} voyage')
            ax4.plot(length[1], label=f'agent {i} distance')
        ax4.legend(loc="best").set_draggable(True)
        
        plt.tight_layout()
        fig.savefig(fname=file, dpi=dpi)
        plt.close("Output")

    @staticmethod
    def _limit_angle(x, domain=1):
        """限制角度 x 的区间: 1限制在(-π, π], 2限制在[0, 2π)"""
        x = x - x//(2*math.pi) * 2*math.pi # any -> [0, 2π)
        if domain == 1 and x > math.pi:
            return x - 2*math.pi           # [0, 2π) -> (-π, π]
        return x

    @staticmethod
    def _linear_mapping(x, x_min, x_max, left=0.0, right=1.0):  
        """x 线性变换: [x_min, x_max] -> [left, right]"""
        y = left + (right - left) / (x_max - x_min) * (x - x_min)
        return y

    @staticmethod
    def _vector_angle(x_vec, y_vec, EPS=1e-8):
        """计算向量 x_vec 与 y_vec 之间的夹角 [0, π]"""
        x = np.linalg.norm(x_vec) * np.linalg.norm(y_vec)
        y = np.dot(x_vec, y_vec)
        if x < EPS: # 0向量情况
            return 0.0
        if y < EPS: # 90°情况
            return math.pi/2
        return math.acos(np.clip(y/x, -1, 1)) # note: x很小的时候, 可能会超过+-1

    @staticmethod
    def _compute_azimuth(pos1, pos2, use_3d_pos=False):
        """东北天坐标系计算pos2相对pos1的方位角 [-π, π] 和高度角(3D情况) [-π/2, π/2] """
        if use_3d_pos:
            x, y, z = np.array(pos2) - pos1
            q = math.atan(z / (math.sqrt(x**2 + y**2) + 1e-8))  # 高度角 [-π/2, π/2]
            ε = math.atan2(y, x)  # 方位角 [-π, π]
            return ε, q
        else:
            x, y = np.array(pos2) - pos1
            return math.atan2(y, x)  # 方位角 [-π, π]

    @staticmethod
    def _fixed_wing_2d(s, t, u):
        """东北天坐标系平面运动ode模型
        s = [x, y, V, ψ]
        u = [a, psi_dot]
        """
        _, _, V, ψ = s
        a, psi_dot = u
        dsdt = [
            V * math.cos(ψ),  # x方向速度
            V * math.sin(ψ),  # y方向速度
            a,               # 加速度
            psi_dot         # 偏航角速率
        ]
        return dsdt

    @classmethod
    def _ode45(cls, s_old, u, dt):
        """微分方程积分"""
        s_new = odeint(cls._fixed_wing_2d, s_old, (0.0, dt), args=(u, )) # shape=(len(t), len(s))
        x, y, V, ψ = s_new[-1]
        V = np.clip(V, V_LOW, V_HIGH)
        ψ = cls._limit_angle(ψ)
        return np.array([x, y, V, ψ], dtype=np.float32) # deepcopy
