import numpy as np
import gym
from gym import spaces
from envs.env_core import EnvCore
from envs.env_uav_cpp_util import UAV_CPP_Utils


class UAVCPPEnvCore(object):
    """
    # 环境中的智能体 - 用于多无人机全路径覆盖（CPP）研究
    # 包含充电约束和栅格地图表示
    """

    def __init__(self):
        self.agent_num = 2  # 设置智能体(无人机)的个数
        # 预先计算观测维度
        # 2(位置) + 1(电量) + 8(周围栅格) + 2(充电站位置) + 1(覆盖率) + 1(是否在充电站) 
        # + 1(障碍物指示器) + 1(电量状态) + 1(充电站距离) + 1(其他智能体覆盖比例) 
        # + 1(未覆盖区域比例) + 1(自己覆盖区域比例) + 8(周围未覆盖详情) + 8(周围自己覆盖详情)
        # + 1(归一化步数) + 7*(agent_num-1)(通信信息)
        self.obs_dim = self._calculate_observation_dim()
        self.action_dim = 5  # 设置智能体的动作维度 (上、下、左、右、充电)
        
        # 栅格地图参数
        self.grid_size = 20  # 10x10的栅格地图
        self.grid = np.zeros((self.grid_size, self.grid_size))  # 初始化栅格地图，0表示未覆盖，1表示已覆盖
        
        # 添加障碍物参数
        self.obstacle_grid = np.zeros((self.grid_size, self.grid_size))  # 障碍物栅格，0表示无障碍，1表示有障碍
        self.obstacle_ratio = 0.1  # 障碍物占比，默认10%
        self.min_obstacles = 3  # 最少障碍物数量
        
        # 无人机参数
        self.uav_positions = np.zeros((self.agent_num, 2), dtype=int)  # 无人机位置 [x, y]
        self.uav_battery = np.ones(self.agent_num) * 100  # 无人机电量，初始为100%
        self.max_battery = 100  # 最大电量
        self.battery_consumption = 1  # 每步消耗的电量
        self.charging_rate = 10  # 充电速率
        
        # 添加电量阈值参数
        self.low_battery_threshold = 30  # 低电量阈值
        self.critical_battery_threshold = 15  # 危险电量阈值
        
        # 充电站位置
        self.charging_stations = np.array([[0, 0], [self.grid_size-1, self.grid_size-1]])  # 充电站位置
        
        self.charging_stations_cnt = len(self.charging_stations)  # 充电站数量
        
        # 覆盖率计算
        self.total_cells = self.grid_size * self.grid_size
        self.covered_cells = 0
        
        # 任务参数
        self.max_steps = 500  # 最大步数
        self.current_step = 0
        
        # 边界惩罚参数
        self.boundary_penalty = 0.5  # 尝试越界的惩罚
        
        # 添加路径记录和重复覆盖惩罚参数
        self.path_history = [[] for _ in range(self.agent_num)]  # 记录每个无人机的路径历史
        self.recent_coverage = np.zeros((self.grid_size, self.grid_size))  # 记录最近覆盖的智能体
        self.coverage_time = np.zeros((self.grid_size, self.grid_size))  # 记录每个格子最后被覆盖的时间
        self.duplicate_coverage_penalty = 0.3  # 重复覆盖惩罚系数
        
        # 添加每个无人机的个人覆盖记录
        self.agent_coverage_maps = [np.zeros((self.grid_size, self.grid_size)) for _ in range(self.agent_num)]  # 每个无人机的个人覆盖地图
        
        # 添加全局未覆盖区域地图 (1表示未覆盖且非障碍物，0表示已覆盖或是障碍物)
        self.uncovered_map = np.ones((self.grid_size, self.grid_size))  # 初始化为全部未覆盖
        
        # 添加自我路径重复惩罚参数 - 增强自我路径重复惩罚
        self.self_path_penalty = 0.3  # 重复走自己路径的惩罚系数
        
        # 添加通信相关参数
        self.enable_communication = True  # 启用通信
        self.communication_range = self.grid_size  # 通信范围，设为地图大小表示全局通信
        self.communication_data = {}  # 通信数据存储
        
        # 添加主从架构相关参数
        self.leader_id = 0  # 默认第一个智能体为主机
        self.leader_follower_mode = True  # 启用主从模式
        
        # 添加通信内容 - 每个智能体可以共享的信息
        for i in range(self.agent_num):
            self.communication_data[i] = {
                'position': None,  # 位置
                'battery': None,   # 电量
                'target': None,    # 目标位置
                'status': 'normal', # 状态：normal, charging, low_battery, critical_battery
                'next_action': None,  # 添加：下一步动作
                'role': 'leader' if i == self.leader_id else 'follower',  # 角色：leader或follower
                'command': None,  # 主机发出的命令
                'command_target': None,  # 主机指定的目标位置
                'assigned_area': None  # 主机分配的区域
            }

    def reset(self):
        """
        # 重置环境
        # 返回值为一个list，每个list里面为一个shape = (self.obs_dim, )的观测数据
        """
        # 重置栅格地图
        self.grid = np.zeros((self.grid_size, self.grid_size))
        
        # 生成随机障碍物
        self._generate_random_obstacles()
        
        # 生成随机充电站位置
        self._generate_charging_stations(self.charging_stations_cnt)
        
        # 重置路径历史和覆盖记录
        self.path_history = [[] for _ in range(self.agent_num)]
        self.recent_coverage = np.zeros((self.grid_size, self.grid_size))
        self.coverage_time = np.zeros((self.grid_size, self.grid_size))
        
        # 重置每个无人机的个人覆盖地图
        self.agent_coverage_maps = [np.zeros((self.grid_size, self.grid_size)) for _ in range(self.agent_num)]
        
        # 重置全局未覆盖区域地图 - 障碍物区域标记为已覆盖(0)
        self.uncovered_map = np.ones((self.grid_size, self.grid_size))
        self.uncovered_map[self.obstacle_grid == 1] = 0  # 障碍物区域标记为0
        
        # 重置无人机位置 - 初始位置在充电站
        # 重置无人机位置 - 初始位置在充电站，但确保分散
        for i in range(self.agent_num):
            if i < len(self.charging_stations):
                # 每个无人机从不同的充电站开始
                self.uav_positions[i] = self.charging_stations[i]
            else:
                # 如果无人机数量超过充电站数量，则随机放置在充电站附近
                station_idx = i % len(self.charging_stations)
                base_pos = self.charging_stations[station_idx]
                
                # 尝试在充电站附近找到一个有效位置
                valid_pos_found = False
                for dx in range(-2, 3):
                    for dy in range(-2, 3):
                        new_x, new_y = base_pos[0] + dx, base_pos[1] + dy
                        if 0 <= new_x < self.grid_size and 0 <= new_y < self.grid_size and self.obstacle_grid[new_x, new_y] == 0:
                            # 检查该位置是否已被其他无人机占用
                            pos_occupied = False
                            for j in range(i):
                                if np.array_equal(self.uav_positions[j], [new_x, new_y]):
                                    pos_occupied = True
                                    break
                            
                            if not pos_occupied:
                                self.uav_positions[i] = np.array([new_x, new_y])
                                valid_pos_found = True
                                break
                    if valid_pos_found:
                        break
            # 记录初始路径
            self.path_history[i].append(tuple(self.uav_positions[i]))
            # 标记初始位置为已覆盖
            x, y = self.uav_positions[i]
            self.grid[x, y] = 1
            self.recent_coverage[x, y] = i + 1  # 记录覆盖的智能体ID (从1开始)
            self.coverage_time[x, y] = self.current_step
            # 更新个人覆盖地图
            self.agent_coverage_maps[i][x, y] = 1
            # 更新全局未覆盖地图
            self.uncovered_map[x, y] = 0
        
        # 重置无人机电量
        self.uav_battery = np.ones(self.agent_num) * 100
        
        # 重置覆盖计数 - 不计算障碍物区域
        self.covered_cells = np.sum(self.grid)
        # 更新总可覆盖单元格数量 - 排除障碍物
        self.total_cells = self.grid_size * self.grid_size - np.sum(self.obstacle_grid)
        
        # 重置步数
        self.current_step = 0
        
        # 返回每个智能体的观测
        sub_agent_obs = []
        for i in range(self.agent_num):
            sub_obs = self._get_observation(i)
            sub_agent_obs.append(sub_obs)
            
        return sub_agent_obs

    def step(self, actions):
        """
        # 执行动作并返回结果
        # actions的输入为一个agent_num维的list，每个list里面为一个shape = (self.action_dim, )的动作数据
        # 动作空间: 0-上, 1-右, 2-下, 3-左, 4-充电
        """
        self.current_step += 1
        
        sub_agent_obs = []
        sub_agent_reward = []
        sub_agent_done = []
        sub_agent_info = []
        
        # 处理每个智能体的动作
        for i in range(self.agent_num):
            # 获取当前智能体的动作
            # 修改这里：确保正确处理动作
            if isinstance(actions[i], np.ndarray) and actions[i].size > 1:
                action = np.argmax(actions[i])  # 获取离散动作索引
            else:
                action = int(actions[i])  # 直接使用动作索引
            
            # 执行动作
            reward, done, info = self._take_action(i, action)
            
            # 获取新的观测
            obs = self._get_observation(i)
            
            # 存储结果
            sub_agent_obs.append(obs)
            sub_agent_reward.append([reward])
            sub_agent_done.append(done)
            sub_agent_info.append(info)
        
        # 检查是否所有单元格都已覆盖
        coverage_done = self.covered_cells >= self.total_cells
        # 检查是否达到最大步数
        step_done = self.current_step >= self.max_steps
        
        # 如果全部覆盖或达到最大步数，则所有智能体都完成
        if coverage_done or step_done:
            sub_agent_done = [True] * self.agent_num
        
        return [sub_agent_obs, sub_agent_reward, sub_agent_done, sub_agent_info]
    
    def _generate_random_obstacles(self):
        """
        生成随机障碍物，确保所有区域可达
        """
        # 重置障碍物栅格
        self.obstacle_grid = np.zeros((self.grid_size, self.grid_size))
        
        # 计算障碍物数量
        obstacle_count = max(self.min_obstacles, int(self.obstacle_ratio * self.total_cells))
        
        # 确保充电站位置没有障碍物
        forbidden_positions = [tuple(pos) for pos in self.charging_stations]
        
        # 随机放置障碍物
        placed_obstacles = 0
        while placed_obstacles < obstacle_count:
            x = np.random.randint(0, self.grid_size)
            y = np.random.randint(0, self.grid_size)
            
            # 检查位置是否已有障碍物或是充电站
            if (x, y) not in forbidden_positions and self.obstacle_grid[x, y] == 0:
                # 临时放置障碍物
                self.obstacle_grid[x, y] = 1
                
                # 检查可达性
                if not self._check_map_reachability():
                    # 如果不可达，撤销障碍物放置
                    self.obstacle_grid[x, y] = 0
                    continue
                
                placed_obstacles += 1
                
    def _check_map_reachability(self):
        """
        检查地图是否所有区域可达
        """
        # 使用BFS检查所有非障碍物区域是否连通
        visited = np.zeros_like(self.obstacle_grid)
        queue = []
        
        # 从任意一个非障碍物点开始
        start_pos = None
        for i in range(self.grid_size):
            for j in range(self.grid_size):
                if self.obstacle_grid[i, j] == 0:
                    start_pos = (i, j)
                    break
            if start_pos is not None:
                break
                
        if start_pos is None:
            return False
            
        queue.append(start_pos)
        visited[start_pos[0], start_pos[1]] = 1
        
        directions = [(-1, 0), (1, 0), (0, -1), (0, 1)]
        
        while queue:
            x, y = queue.pop(0)
            
            for dx, dy in directions:
                nx, ny = x + dx, y + dy
                
                if 0 <= nx < self.grid_size and 0 <= ny < self.grid_size:
                    if self.obstacle_grid[nx, ny] == 0 and visited[nx, ny] == 0:
                        visited[nx, ny] = 1
                        queue.append((nx, ny))
        
        # 检查所有非障碍物区域是否都被访问过
        for i in range(self.grid_size):
            for j in range(self.grid_size):
                if self.obstacle_grid[i, j] == 0 and visited[i, j] == 0:
                    return False
        
        return True
 
    def _take_action(self, agent_id, action):
        """
        执行单个智能体的动作
        """
        reward = 0
        done = False
        info = {}
        
        # 获取当前位置
        x, y = self.uav_positions[agent_id]
        new_x, new_y = x, y
        
        # 记录原始动作
        info['original_action'] = int(action)
        
        # 获取最近充电站信息
        min_dist = float('inf')
        closest_station = None
        for station in self.charging_stations:
            dist = np.abs(x - station[0]) + np.abs(y - station[1])  # 曼哈顿距离
            if dist < min_dist:
                min_dist = dist
                closest_station = station
        
        # 处理危险电量情况 - 强制向最近充电站移动
        battery_level = self.uav_battery[agent_id]
        if battery_level <= self.critical_battery_threshold and action != 4:
            # 记录原始动作被覆盖
            info['action_overridden'] = True
            
            # 确定向充电站移动的方向
            if closest_station is not None:
                # 优先减少x方向距离
                if x < closest_station[0]:
                    action = 2  # 下
                elif x > closest_station[0]:
                    action = 0  # 上
                # 然后减少y方向距离
                elif y < closest_station[1]:
                    action = 1  # 右
                elif y > closest_station[1]:
                    action = 3  # 左
                else:
                    action = 4  # 已在充电站，执行充电
            
            # 更新通信状态为危险电量
            if self.enable_communication:
                self.communication_data[agent_id]['status'] = 'critical_battery'
        
        # 处理动作
        if action == 4:  # 充电
            # 检查是否在充电站
            at_charging_station = False
            for station in self.charging_stations:
                if np.array_equal(self.uav_positions[agent_id], station):
                    at_charging_station = True
                    break
            
            if at_charging_station:
                # 在充电站充电
                old_battery = self.uav_battery[agent_id]
                self.uav_battery[agent_id] = min(self.max_battery, self.uav_battery[agent_id] + self.charging_rate)
                
                # 检查电池是否已满
                if old_battery >= self.max_battery:
                    # 电池已满但仍在充电站充电，给予惩罚
                    reward -= 0.5
                    info['battery_full_charging'] = True
                else:
                    # 充电奖励 - 小的正奖励
                    reward += 0.1 * (self.uav_battery[agent_id] - old_battery) / self.charging_rate
                    
                    # 电量低时充电给予额外奖励
                    if old_battery < self.low_battery_threshold:
                        reward += 0.1 * (self.uav_battery[agent_id] - old_battery) / self.charging_rate
                        
                        # 电量低时充电给予额外奖励
                        if old_battery < self.low_battery_threshold:
                            reward += 0.5  # 低电量时充电的额外奖励
                    
                    # 更新通信状态为充电中
                    if self.enable_communication:
                        self.communication_data[agent_id]['status'] = 'charging'
            else:
                # 不在充电站但尝试充电 - 小的负奖励
                reward -= 0.5
        else:  # 移动
            # 检查电量
            if self.uav_battery[agent_id] <= 0:
                # 电量耗尽，无法移动
                reward -= 1.0  # 电量耗尽的惩罚
                info['battery_depleted'] = True
                return reward, done, info
            
            # 根据动作更新位置
            hit_boundary = False
            if action == 0:  # 上
                if x > 0:
                    new_x = x - 1
                else:
                    hit_boundary = True
            elif action == 1:  # 右
                if y < self.grid_size - 1:
                    new_y = y + 1
                else:
                    hit_boundary = True
            elif action == 2:  # 下
                if x < self.grid_size - 1:
                    new_x = x + 1
                else:
                    hit_boundary = True
            elif action == 3:  # 左
                if y > 0:
                    new_y = y - 1
                else:
                    hit_boundary = True
            
            # 处理边界惩罚
            if hit_boundary:
                reward -= self.boundary_penalty * 2
                info['hit_boundary'] = True
            else:
                # 检查是否撞到障碍物
                if self.obstacle_grid[new_x, new_y] == 1:
                    # 撞到障碍物，不移动，给予惩罚
                    reward -= self.boundary_penalty
                    info['hit_obstacle'] = True
                    new_x, new_y = x, y  # 恢复原位置
                else:
                    # 检查是否重复走自己的路径 - 增强自我路径重复惩罚
                    if self.agent_coverage_maps[agent_id][new_x, new_y] == 1:
                        # 计算上次访问这个位置的时间
                        last_visit_time = 0
                        for t, pos in enumerate(self.path_history[agent_id]):
                            if pos == (new_x, new_y):
                                last_visit_time = t
                        time_since_last_visit = len(self.path_history[agent_id]) - last_visit_time
                        
                        # 如果最近访问过，惩罚更大
                        if time_since_last_visit < 20:
                            self_path_penalty = self.self_path_penalty * (20 - time_since_last_visit) / 20
                            reward -= self_path_penalty
                            info['self_path_repeat'] = True
                            info['self_path_penalty'] = float(self_path_penalty)
                    
                    # 更新位置
                    self.uav_positions[agent_id] = np.array([new_x, new_y])
                    
                    # 记录路径历史
                    self.path_history[agent_id].append((new_x, new_y))
                    
                    # 消耗电量
                    self.uav_battery[agent_id] -= self.battery_consumption
                    
                    # 检查是否覆盖了新的单元格
                    # 奖励归一化 - 确保所有奖励在相似的尺度上
                    # 覆盖新区域的奖励
                    if self.grid[new_x, new_y] == 0:
                        self.grid[new_x, new_y] = 1
                        self.covered_cells += 1
                        # 使用固定奖励而不是动态计算
                        coverage_reward = 1.0
                        reward += coverage_reward
                        # 记录覆盖信息
                        self.recent_coverage[new_x, new_y] = agent_id + 1
                        self.coverage_time[new_x, new_y] = self.current_step
                        # 更新全局未覆盖地图
                        self.uncovered_map[new_x, new_y] = 0
                    else:
                        # 重复覆盖的处理
                        last_agent = self.recent_coverage[new_x, new_y] - 1  # 转换回0-based索引
                        time_since_coverage = self.current_step - self.coverage_time[new_x, new_y]
                        
                        # 如果是另一个智能体最近覆盖的，且时间较近，给予更大的惩罚
                        if last_agent != agent_id and time_since_coverage < 10:
                            # 增加重复覆盖其他智能体路径的惩罚
                            duplicate_penalty = self.duplicate_coverage_penalty * 1.5 * (10 - time_since_coverage) / 10
                            reward -= duplicate_penalty
                            info['duplicate_coverage'] = True
                            info['duplicate_penalty'] = float(duplicate_penalty)
                        else:
                            # 增加重复覆盖自己路径的惩罚
                            reward -= 0.3  # 从0.1增加到0.3
                        
                        # 更新覆盖信息
                        self.recent_coverage[new_x, new_y] = agent_id + 1
                        self.coverage_time[new_x, new_y] = self.current_step
                    
                    # 添加：朝向未覆盖区域移动的奖励
                    # 检查周围8个方向是否有未覆盖区域
                    surrounding_uncovered = 0
                    directions = [(-1,0), (0,1), (1,0), (0,-1), (-1,1), (1,1), (1,-1), (-1,-1)]
                    for dx, dy in directions:
                        nx, ny = new_x + dx, new_y + dy
                        if 0 <= nx < self.grid_size and 0 <= ny < self.grid_size:
                            if self.uncovered_map[nx, ny] == 1:
                                surrounding_uncovered += 1
                    
                    # 如果周围有未覆盖区域，给予额外奖励
                    if surrounding_uncovered > 0:
                        exploration_reward = 0.2 * surrounding_uncovered / 8
                        reward += exploration_reward
                        info['exploration_reward'] = float(exploration_reward)
                    
                    # 添加：距离最近未覆盖区域的奖励
                    nearest_uncovered = self._find_nearest_uncovered(agent_id)
                    if nearest_uncovered is not None:
                        old_dist = abs(x - nearest_uncovered[0]) + abs(y - nearest_uncovered[1])
                        new_dist = abs(new_x - nearest_uncovered[0]) + abs(new_y - nearest_uncovered[1])
                        
                        # 如果靠近了未覆盖区域，给予奖励
                        if new_dist < old_dist:
                            approach_uncovered_reward = 0.3
                            reward += approach_uncovered_reward
                            info['approaching_uncovered'] = True
                        # 如果远离了未覆盖区域，给予惩罚
                        elif new_dist > old_dist:
                            retreat_uncovered_penalty = 0.2
                            reward -= retreat_uncovered_penalty
                            info['retreating_from_uncovered'] = True
                    
                    # 更新个人覆盖地图 - 无论是否为新覆盖
                    self.agent_coverage_maps[agent_id][new_x, new_y] = 1
            
            # 电量低的处理
            if self.uav_battery[agent_id] < self.low_battery_threshold:
                # 更新通信状态为低电量
                if self.enable_communication:
                    self.communication_data[agent_id]['status'] = 'low_battery'
                
                # 计算与最近充电站的距离
                new_min_dist = float('inf')
                for station in self.charging_stations:
                    dist = np.abs(new_x - station[0]) + np.abs(new_y - station[1])  # 曼哈顿距离
                    new_min_dist = min(new_min_dist, dist)
                
                # 电量低且距离充电站远的惩罚
                battery_penalty = (self.low_battery_threshold - self.uav_battery[agent_id]) * 0.1 * new_min_dist / self.grid_size
                reward -= battery_penalty
                
                # 如果移动后距离充电站更近，给予奖励
                if new_min_dist < min_dist:
                    # 靠近充电站的奖励与电量成反比
                    approach_reward = 0.2 * (self.low_battery_threshold - self.uav_battery[agent_id]) / self.low_battery_threshold
                    reward += approach_reward
                    info['approaching_station'] = True
                # 如果移动后距离充电站更远，给予惩罚
                elif new_min_dist > min_dist and self.uav_battery[agent_id] < self.critical_battery_threshold:
                    # 远离充电站的惩罚与电量成反比
                    retreat_penalty = 0.3 * (self.critical_battery_threshold - self.uav_battery[agent_id]) / self.critical_battery_threshold
                    reward -= retreat_penalty
                    info['retreating_from_station'] = True
            else:
                # 电量正常
                if self.enable_communication:
                    self.communication_data[agent_id]['status'] = 'normal'
        
        # 更新通信数据
        if self.enable_communication:
            self.communication_data[agent_id]['position'] = self.uav_positions[agent_id].copy()
            self.communication_data[agent_id]['battery'] = float(self.uav_battery[agent_id])
            
            # 防震荡：目标更新策略改进
            # 1. 减少目标更新频率，避免频繁切换目标
            # 2. 只有在特定条件下才更新目标
            
            # 获取当前目标
            current_target = self.communication_data[agent_id].get('target', None)
            update_target = False
            
            # 判断是否需要更新目标
            if current_target is None:  # 没有目标时必须更新
                update_target = True
            elif self.uav_battery[agent_id] <= self.low_battery_threshold:  # 低电量时更新为充电站
                update_target = True
            elif current_target is not None and self.grid[current_target[0], current_target[1]] == 1:  # 当前目标已被覆盖
                update_target = True
            elif self.current_step % 10 == 0:  # 每10步强制更新一次，避免长时间卡在某个目标
                update_target = True
            
            # 根据条件更新目标
            if update_target:
                if self.uav_battery[agent_id] > self.low_battery_threshold:
                    # 寻找最近的未被其他智能体覆盖或目标的区域
                    target = self._find_best_uncovered_target(agent_id)
                    self.communication_data[agent_id]['target'] = target
                else:
                    # 低电量时目标是充电站
                    self.communication_data[agent_id]['target'] = closest_station
                
            # 主从架构特殊处理
            if self.leader_follower_mode:
                # 如果是从机，检查是否执行了主机的命令
                if agent_id != self.leader_id:
                    leader_command_target = self.communication_data[self.leader_id].get('command_target', None)
                    if leader_command_target is not None and isinstance(leader_command_target, dict) and str(agent_id) in leader_command_target:
                        assigned_target = leader_command_target[str(agent_id)]
                        if assigned_target is not None:
                            # 检查从机是否朝着分配的目标移动
                            if np.array_equal(self.communication_data[agent_id]['target'], assigned_target):
                                # 如果从机正在执行主机的命令，给予奖励
                                follower_cooperation_reward = 0.3
                                reward += follower_cooperation_reward
                                info['follower_cooperation'] = True
                
                # 如果是主机，检查从机是否执行了命令
                if agent_id == self.leader_id:
                    # 检查每个从机
                    leader_command_target = self.communication_data[agent_id].get('command_target', None)
                    if leader_command_target is not None and isinstance(leader_command_target, dict):
                        leader_reward = 0
                        followers_following_commands = 0
                        
                        for follower_id_str, target in leader_command_target.items():
                            follower_id = int(follower_id_str)
                            if target is not None and follower_id < self.agent_num:
                                # 检查从机的目标是否与主机分配的一致
                                if np.array_equal(self.communication_data[follower_id]['target'], target):
                                    followers_following_commands += 1
                        
                        # 根据执行命令的从机数量给予奖励
                        if followers_following_commands > 0:
                            leader_reward = 0.2 * followers_following_commands
                            reward += leader_reward
                            info['leader_command_followed'] = followers_following_commands
        
        # 更新信息
        info['position'] = self.uav_positions[agent_id].copy()
        info['battery'] = float(self.uav_battery[agent_id])
        info['coverage'] = float(self.covered_cells / self.total_cells)
        info['battery_status'] = 'normal'
        
        # 添加电量状态信息
        if self.uav_battery[agent_id] <= self.critical_battery_threshold:
            info['battery_status'] = 'critical'
        elif self.uav_battery[agent_id] <= self.low_battery_threshold:
            info['battery_status'] = 'low'
        
        return reward, done, info
    
    def _get_observation(self, agent_id):
        """
        获取指定智能体的观测
        """
        x, y = self.uav_positions[agent_id]
        
        # 基本观测：位置和电量
        obs = np.zeros(self.obs_dim)
        
        # 1-2: 位置信息 (归一化)
        obs[0] = x / (self.grid_size - 1)
        obs[1] = y / (self.grid_size - 1)
        
        # 3: 电量信息 (归一化)
        obs[2] = self.uav_battery[agent_id] / self.max_battery
        
        # 4-11: 周围8个格子的覆盖状态
        idx = 3
        directions = [(-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1), (-1,-1)]
        for dx, dy in directions:
            nx, ny = x + dx, y + dy
            if 0 <= nx < self.grid_size and 0 <= ny < self.grid_size:
                # 如果格子已覆盖或是障碍物，则为1，否则为0
                if self.grid[nx, ny] == 1 or self.obstacle_grid[nx, ny] == 1:
                    obs[idx] = 1
            else:
                # 边界外视为已覆盖
                obs[idx] = 1
            idx += 1
        
        # 12-13: 最近充电站位置 (归一化)
        min_dist = float('inf')
        closest_station = None
        for station in self.charging_stations:
            dist = abs(x - station[0]) + abs(y - station[1])  # 曼哈顿距离
            if dist < min_dist:
                min_dist = dist
                closest_station = station
        
        if closest_station is not None:
            obs[idx] = closest_station[0] / (self.grid_size - 1)
            obs[idx+1] = closest_station[1] / (self.grid_size - 1)
        idx += 2
        
        # 14: 当前覆盖率
        obs[idx] = self.covered_cells / self.total_cells
        idx += 1
        
        # 15: 是否在充电站
        at_charging_station = False
        for station in self.charging_stations:
            if np.array_equal(self.uav_positions[agent_id], station):
                at_charging_station = True
                break
        obs[idx] = 1 if at_charging_station else 0
        idx += 1
        
        # 16: 当前位置是否有障碍物
        obs[idx] = self.obstacle_grid[x, y]
        idx += 1
        
        # 17: 电量状态 (0:正常, 0.5:低, 1:危险)
        if self.uav_battery[agent_id] <= self.critical_battery_threshold:
            obs[idx] = 1.0  # 危险
        elif self.uav_battery[agent_id] <= self.low_battery_threshold:
            obs[idx] = 0.5  # 低
        else:
            obs[idx] = 0.0  # 正常
        idx += 1
        
        # 18: 到最近充电站的距离 (归一化)
        obs[idx] = min_dist / (2 * self.grid_size)  # 最大距离为地图对角线
        idx += 1
        
        # 19: 其他智能体覆盖的区域比例
        other_coverage = 0
        
        # 添加：主从架构相关观测
        # 角色标识 (0:从机, 1:主机)
        is_leader = 1 if agent_id == self.leader_id else 0
        self.communication_data[agent_id]['role'] = 'leader' if is_leader == 1 else 'follower'
        for i in range(self.agent_num):
            if i != agent_id:
                other_coverage += np.sum(self.agent_coverage_maps[i])
        obs[idx] = other_coverage / self.total_cells
        idx += 1
        
        # 20: 未覆盖区域比例
        uncovered_count = np.sum(self.uncovered_map)
        obs[idx] = uncovered_count / self.total_cells
        idx += 1
        
        # 21: 自己覆盖区域比例
        self_coverage = np.sum(self.agent_coverage_maps[agent_id])
        obs[idx] = self_coverage / self.total_cells
        idx += 1
        
        # 22-29: 周围8个方向的未覆盖详情
        for dx, dy in directions:
            nx, ny = x + dx, y + dy
            if 0 <= nx < self.grid_size and 0 <= ny < self.grid_size:
                obs[idx] = self.uncovered_map[nx, ny]
            else:
                obs[idx] = 0  # 边界外视为已覆盖
            idx += 1
        
        # 30-37: 周围8个方向的自己覆盖详情
        for dx, dy in directions:
            nx, ny = x + dx, y + dy
            if 0 <= nx < self.grid_size and 0 <= ny < self.grid_size:
                obs[idx] = self.agent_coverage_maps[agent_id][nx, ny]
            else:
                obs[idx] = 0  # 边界外视为未覆盖
            idx += 1
        
        # 38: 归一化步数
        obs[idx] = self.current_step / self.max_steps
        idx += 1
        
        # 添加角色标识 (1) - 是主机还是从机
        obs[idx] = 1.0 if agent_id == self.leader_id else 0.0
        idx += 1
        
        # 39-47, 48-56, ...: 其他智能体的通信信息 (每个智能体9个信息)
        if self.enable_communication:
            for i in range(self.agent_num):
                if i != agent_id:
                    # 其他智能体的位置 (2)
                    if self.communication_data[i]['position'] is not None:
                        pos = self.communication_data[i]['position']
                        obs[idx] = pos[0] / (self.grid_size - 1)
                        obs[idx+1] = pos[1] / (self.grid_size - 1)
                    idx += 2
                    
                    # 其他智能体的电量 (1)
                    if self.communication_data[i]['battery'] is not None:
                        obs[idx] = self.communication_data[i]['battery'] / self.max_battery
                    idx += 1
                    
                    # 其他智能体的目标位置 (2)
                    if self.communication_data[i]['target'] is not None:
                        target = self.communication_data[i]['target']
                        obs[idx] = target[0] / (self.grid_size - 1)
                        obs[idx+1] = target[1] / (self.grid_size - 1)
                    idx += 2
                    
                    # 其他智能体的状态 (1)
                    status = self.communication_data[i]['status']
                    if status == 'normal':
                        obs[idx] = 0.0
                    elif status == 'low_battery':
                        obs[idx] = 0.5
                    elif status == 'critical_battery':
                        obs[idx] = 0.75
                    elif status == 'charging':
                        obs[idx] = 1.0
                    idx += 1
                    
                    # 其他智能体的下一步动作 (1)
                    if self.communication_data[i]['next_action'] is not None:
                        obs[idx] = self.communication_data[i]['next_action'] / (self.action_dim - 1)
                    idx += 1
                    
                    # 其他智能体的角色 (1) - 是主机还是从机
                    role = self.communication_data[i]['role']
                    obs[idx] = 1.0 if role == 'leader' else 0.0
                    idx += 1
                    
                    # 如果当前智能体是从机，且通信对象是主机，则接收命令
                    if agent_id != self.leader_id and i == self.leader_id and self.communication_data[i]['command_target'] is not None:
                        # 主机指定的目标位置 (2) - 只有从机才接收这个信息
                        target = self.communication_data[i]['command_target']
                        # 如果主机指定了当前从机为目标接收者
                        if isinstance(target, dict) and str(agent_id) in target:
                            specific_target = target[str(agent_id)]
                            if specific_target is not None:
                                # 更新从机的目标
                                self.communication_data[agent_id]['target'] = specific_target
        
        # 添加：更新目标和路径规划
        self._find_best_uncovered_target(agent_id)
        
        # 返回观测
        return obs
        
    def _find_nearest_uncovered(self, agent_id):
        """
        寻找距离当前无人机最近的未覆盖区域
        """
        return UAV_CPP_Utils._find_nearest_uncovered(self, agent_id)

    def _plan_path_to_target(self, agent_id, target_pos):
        """
        使用A*算法为无人机规划从当前位置到目标位置的路径
        
        Args:
            agent_id: 无人机ID
            target_pos: 目标位置 (x, y)
            
        Returns:
            next_action: 下一步应该执行的动作 (0-上, 1-右, 2-下, 3-左)
        """
        return UAV_CPP_Utils._plan_path_to_target(self, agent_id, target_pos)
    
    def _find_best_uncovered_target(self, agent_id):
        """
        寻找最佳的未覆盖目标，考虑其他智能体的目标
        """
        return UAV_CPP_Utils._find_best_uncovered_target(self, agent_id)
        
    def _leader_assign_tasks(self):
        """
        主机分配任务给从机 - 改进版
        考虑工作负载平衡、电量状态和区域划分
        """
        UAV_CPP_Utils._leader_assign_tasks(self)

    def _calculate_observation_dim(self, agent_num=None):
        """
        计算观测空间维度
        """
        return UAV_CPP_Utils._calculate_observation_dim(self, agent_num)
        
    def set_leader_follower_mode(self, enable=True, leader_id=0):
        """
        设置主从模式
        
        Args:
            enable: 是否启用主从模式
            leader_id: 主机ID
        """
        self.leader_follower_mode = enable
        self.leader_id = leader_id
        
        # 更新通信数据中的角色信息
        for i in range(self.agent_num):
            if i in self.communication_data:
                self.communication_data[i]['role'] = 'leader' if i == self.leader_id else 'follower'
        
        print(f"主从模式已{'启用' if enable else '禁用'}，主机ID: {leader_id}")
    
    def _generate_charging_stations(self, required_stations):
        """
        根据需要的充电站数量生成充电站位置
        
        Args:
            required_stations: 需要的充电站数量
        """
        UAV_CPP_Utils._generate_charging_stations(self, required_stations)



class UAVCPPDiscreteActionEnv(object):
    """
    对于离散动作环境的封装 - 用于多无人机全路径覆盖（CPP）研究
    """

    def __init__(self, leader_follower_mode=True, leader_id=0):
        """
        初始化离散动作环境
        
        Args:
            leader_follower_mode: 是否启用主从模式
            leader_id: 主机ID
        """
        self.env = UAVCPPEnvCore()
        self.num_agent = self.env.agent_num
        self.signal_action_dim = self.env.action_dim
        
        # 设置主从模式
        self.env.set_leader_follower_mode(leader_follower_mode, leader_id)
        
        # 直接使用环境的观测维度，不再通过临时观测获取
        self.obs_dim = self.env.obs_dim
        
        # 为了与 MAPPO 兼容，定义观察和动作空间
        self.observation_space = [spaces.Box(low=-np.inf, high=np.inf, shape=(self.obs_dim,)) for _ in range(self.num_agent)]
        self.action_space = [spaces.Discrete(self.signal_action_dim) for _ in range(self.num_agent)]
        
        # 修改共享观测空间的维度，使其为所有智能体观测的组合
        self.share_observation_space = [spaces.Box(low=-np.inf, high=np.inf, shape=(self.obs_dim * self.num_agent,)) for _ in range(self.num_agent)]
    
    def reset(self):
        """
        重置环境
        """
        obs = self.env.reset()
        # 更新观测维度（如果有变化）
        if len(obs) > 0:
            self.obs_dim = len(obs[0])
            # 更新观测空间
            self.observation_space = [spaces.Box(low=-np.inf, high=np.inf, shape=(self.obs_dim,)) for _ in range(self.num_agent)]
            # 更新共享观测空间
            self.share_observation_space = [spaces.Box(low=-np.inf, high=np.inf, shape=(self.obs_dim * self.num_agent,)) for _ in range(self.num_agent)]
        return obs
    
    def step(self, actions):
        """
        执行动作
        """
        return self.env.step(actions)
    
    def seed(self, seed=None):
        """
        设置随机种子
        """
        np.random.seed(seed)
    
    def set_leader_follower_mode(self, enable=True, leader_id=0):
        """
        设置主从模式
        
        Args:
            enable: 是否启用主从模式
            leader_id: 主机ID
        """
        self.env.set_leader_follower_mode(enable, leader_id)
        # 更新观测空间维度
        self.obs_dim = self.env.obs_dim
        self.observation_space = [spaces.Box(low=-np.inf, high=np.inf, shape=(self.obs_dim,)) for _ in range(self.num_agent)]
        self.share_observation_space = [spaces.Box(low=-np.inf, high=np.inf, shape=(self.obs_dim * self.num_agent,)) for _ in range(self.num_agent)]
    
    def close(self):
        """
        关闭环境
        """
        pass


if __name__ == "__main__":
    """
    测试UAV CPP环境
    """
    # 创建环境
    env = UAVCPPDiscreteActionEnv()
    print("观察空间:", env.observation_space)
    print("动作空间:", env.action_space)
    
    # 重置环境
    obs = env.reset()
    print("初始观察:", [o.shape for o in obs])  # 修改这里，显示每个智能体的观测形状
    
    # 执行随机动作
    for i in range(10):
        # 随机动作
        actions = []
        for j in range(env.num_agent):
            # 直接使用离散动作索引
            action = np.random.randint(0, env.signal_action_dim)
            actions.append(action)
        
        # 执行步进
        next_obs, rewards, dones, infos = env.step(actions)
        print(f"步骤 {i+1}:")
        print("  观察形状:", [o.shape for o in next_obs])  # 修改这里，显示每个智能体的观测形状
        print("  奖励:", rewards)
        print("  完成:", dones)
        print("  信息:", infos)
        
        # 如果所有智能体都完成，则重置环境
        if all(dones):
            print("环境完成，重置...")
            obs = env.reset()
    
    print("测试完成!")
    env.close()


