'''
方向
速度

'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull, Delaunay
from matplotlib.patches import Polygon
from matplotlib.patches import Circle

Find_size = 50 # 寻找半径
# 定义环境
class Environment:
    def __init__(self, num_agents, num_targets):

        self.state_dim = 2
        self.action_dim = 4  # 上、下、左、右四个动作
        self.num_agents = num_agents

        self.num_targets = num_targets
        # self.targets = np.random.randint(-50, 50, size=(self.num_targets, 2))
        self.targets = [[10, -10]]
        self.agent_pos = np.random.randint(-50, 50, size=(self.num_agents, 2))
        print("self.target :", self.targets)
        print("self.agents:", self.agent_pos)
        self.agent_pos_initial = self.agent_pos.copy()
        self.targets_pos_initial = self.targets.copy()
        self.filtered_agent_pos = [None]*self.num_agents
        # plt.figure()  # 创建新的图形窗口


        # 初始化matplotlib图形
        self.fig, (self.ax,self.ax_loss,self.ax_reward) = plt.subplots(3, 1)
        # self.scatter = self.ax.scatter(self.agent_pos[:, 0], self.agent_pos[:, 1], label='Agent', marker='o', color='blue')
        # self.ax.scatter(self.targets[:, 0], self.targets[:, 1], label='Target', marker='x', color='red')
        # self.ax.set_xlim(-100, 100)  # 根据实际情况调整坐标轴范围
        # self.ax.set_ylim(-100, 100)
        # plt.legend()
        # plt.title('Target and Agent Positions')
        
        print("环境初始化完成")

    # 修改 Environment 类中的 restart 方法
    def restart(self):
        # 重置目标点的坐标
        self.targets = self.targets_pos_initial
        self.filtered_agent_pos = [None]*self.num_agents
        # 随机生成多个智能体的初始位置
        self.agent_pos = self.agent_pos_initial.copy()

    def get_state(self):
        return self.agent_pos
    def get_target(self):
        return self.targets
    def get_distance(self):
        self_states = np.copy(self.agent_pos)
        target_states = np.copy(self.targets)
        # 扩展self_states和target_states到相同的维度
        self_states_exp = np.expand_dims(self_states, axis=1) 
        target_states_exp = np.expand_dims(target_states, axis=0)
        # print("self_states_exp:", self_states_exp)
        # print("target_states_exp:", target_states_exp)
        # 计算差值
        distance = self_states_exp - target_states_exp
        # print(distance)
        # 将结果reshape到[num_agents*num_target, 2]的形状
        distance = distance.reshape(-1, 2*self.num_targets)
        # print(distance)
        # 归一化
        # norm = np.linalg.norm(distance, axis=1, keepdims=True)
        # distance = distance / norm
        return distance
    def step(self, actions):
        # print("self.agent_pos1", self.agent_pos)
        # 执行动作，并返回奖励
        if action == 0:
            self.agent_pos[agent_idx, 1] += 1  # 上
        elif action == 1:
            self.agent_pos[agent_idx, 1] -= 1  # 下
        elif action == 2:
            self.agent_pos[agent_idx, 0] -= 1  # 左
        elif action == 3:
            self.agent_pos[agent_idx, 0] += 1  # 右
        # 计算奖励
        distances = np.linalg.norm(self.agent_pos[agent_idx] - self.targets, axis=1)
        closest_agent_idx = np.argmin(distances)
        closest_distance = distances[closest_agent_idx]
        try:
            reward = 1/(closest_distance+0.1)
        except:
            reward = Find_size
        # print("self.agent_pos2", self.agent_pos)
        return reward

    #寻找所有可以形成包围圈的飞机，并判断是否形成包围
    def is_terminal(self):
        self.filtered_agent_pos = [None]*self.num_agents
        index_list = [0] * self.num_agents
        for i, pos in enumerate(self.agent_pos):
            close_points = [other_pos for j, other_pos in enumerate(self.agent_pos) if i != j and np.linalg.norm(pos - other_pos) < Find_size]
            if len(close_points) >= 2:
                if self.filtered_agent_pos[i] is None:
                    self.filtered_agent_pos[i] = np.vstack([close_points, np.array(pos)])

        target_enclosed = [False]*self.num_agents
        for i, pos in enumerate(self.agent_pos):
            if self.filtered_agent_pos[i] is None:
                continue
            # print(self.filtered_agent_pos[i], self.targets[0])
            target_enclosed[0] = self.isInHull(self.filtered_agent_pos[i], [self.targets[0]])[0]
            if target_enclosed[0]:
                index_list[i] += 1
                for j, other_pos in enumerate(self.agent_pos):
                        if i != j and np.linalg.norm(pos - other_pos) < Find_size:
                            index_list[j] += 1
        # print(target_enclosed)
        return target_enclosed, index_list
        # return False
    
    # 检查点是否在凸包内
    def isInHull(self, agent_pos, target):
        isInHull = [False]
        try:
            hull = ConvexHull(agent_pos)
            A = hull.equations[:,0:-1]
            b = np.transpose(np.array([hull.equations[:,-1]]))
            isInHull = np.all((A @ np.transpose(target)) <= np.tile(-b,(1,len(target))),axis=0)
        except:
            # print("无法形成有效凸包")
            pass
        return isInHull
    
    # 修改 Environment 类中的 update_visualization 方法
    def update_visualization(self, loss_list, rewards_list):
        '''
        更新散点图和凸包线
        '''
        # 清除先前的散点图和凸包线
        self.ax.clear()
        # for patch in self.ax.patches:
        #     patch.remove()

        # 创建新的散点图，包括代理点和目标点
        # self.scatter = self.ax.scatter(self.agent_pos[:, 0], self.agent_pos[:, 1], label='Agent', marker='o', color='blue')
        # 为每个点添加图例
        for i in range(self.num_agents):
            self.ax.plot(self.agent_pos[i, 0], self.agent_pos[i, 1], 'o', label=f'Agent{i}')
        for i in range(self.num_targets):
            self.ax.plot(self.targets[i, 0], self.targets[i, 1], 'x', label=f'Target{i}')    
        # 绘制半径为20的圆形
        for pos in self.agent_pos:
            circle = Circle((pos[0], pos[1]), Find_size, fill=False, color='blue', alpha=0.3)
            self.ax.add_patch(circle)

        # 绘制凸包的边界
        for i in range(self.num_agents):
            if self.filtered_agent_pos[i] is not None:
                try:
                    hull = ConvexHull(self.filtered_agent_pos[i])
                    hull_vertices = hull.vertices
                    hull_polygon = Polygon(self.filtered_agent_pos[i][hull_vertices], edgecolor='r', linestyle='--', linewidth=2, closed=True, fill="red")
                    self.ax.add_patch(hull_polygon)
                except:
                    # print("无法形成有效凸包")
                    pass
        # 设置坐标轴范围
        self.ax.set_xlim(-100, 100)
        self.ax.set_ylim(-100, 100)
        # 更新图例和标题
        self.ax.legend()
        self.ax.set_title('Target and Agent Positions')

        '''
        更新loss曲线
        '''
        if (len(loss_list)):
            # 清除先前的损失值图形
            self.ax_loss.clear()
            # 绘制新的损失值图形
            for i in range(self.num_agents):
                self.ax_loss.plot(loss_list[i])
            # 更新图例和标题
            self.ax_loss.legend(['Agent {}'.format(i) for i in range(self.num_agents)])
            self.ax_loss.set_title('Loss Values')

        '''
        更新reward曲线
        '''
        # 清除先前的损失值图形
        self.ax_reward.clear()
        # 绘制新的损失值图形
        for i in range(self.num_agents):
            self.ax_reward.plot(rewards_list[i])
        # 更新图例和标题
        self.ax_reward.legend(['Agent {}'.format(i) for i in range(self.num_agents)])
        self.ax_reward.set_title('Reward Values')

        plt.pause(0.01)  # 暂停一小段时间，以便图形更新