import sys
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from pathlib import Path
import argparse
import json
from gym import spaces  # 添加这一行导入spaces

model_dir = Path("f:/curWork/light_mappo/results/UAV_CPP/uav_cpp/rmappo/uav_cpp_training_test/run10/models")
# model_dir = Path("f:/curWork/light_mappo/results/UAV_CPP/uav_cpp/rmappo/uav_cpp_testing/run8/models")

# 添加项目路径
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

# 导入环境和算法
from envs.env_uav_cpp import UAVCPPDiscreteActionEnv, UAVCPPEnvCore
from algorithms.algorithm.r_actor_critic import R_Actor


class Args:
    def __init__(self):
        self.hidden_size = 64
        self.gain = 0.01
        self.use_orthogonal = True
        self.use_policy_active_masks = True
        # 修改这里，启用循环策略
        self.use_naive_recurrent_policy = False
        self.use_recurrent_policy = True  # 改为True，启用循环策略
        self.recurrent_N = 1
        self.use_popart = False
        # 添加MLPBase需要的属性
        self.use_feature_normalization = True
        self.use_ReLU = True
        self.stacked_frames = 1
        self.layer_N = 1
        # 添加可能需要的其他属性
        self.gain = 0.01
        self.use_centralized_V = True
        self.use_huber_loss = True
        self.huber_delta = 10.0
        self.use_max_grad_norm = True
        self.max_grad_norm = 10.0
        self.use_gae = True
        self.gae_lambda = 0.95
        self.use_proper_time_limits = False
        self.use_valuenorm = False
        self.use_value_active_masks = True
        self.use_eval = False


# 修改 masks 的初始化和更新方式

# 添加中文字体支持
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']  # 指定默认字体为黑体
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决保存图像时负号'-'显示为方块的问题

# 关于测试复杂环境的方法

# 你提出了一个很好的问题。测试更复杂环境（更大地图、更多障碍物）有几种方法：

# 方法一：直接修改环境参数进行测试（不需要重新训练）

# 如果您想用现有模型直接测试在更复杂环境中的表现，可以修改<mcfile name="env_uav_cpp.py" path="f:\curWork\light_mappo\envs\env_uav_cpp.py"></mcfile>中的参数，然后使用<mcfile name="visualize_agent.py" path="f:\curWork\light_mappo\visualize_agent.py"></mcfile>进行可视化测试。

# 这种方法的优点是快速，不需要重新训练，但缺点是模型可能在新环境中表现不佳，因为它是在简单环境中训练的。

# 修改方法如下：
def visualize_agent_behavior(test_episodes=5, save_animation=True, shared_policy=True, 
                            grid_size=15, obstacle_ratio=0.15, num_agents=2):  # 添加无人机数量参数
    # 设置设备
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    # 创建环境
    env = UAVCPPDiscreteActionEnv()
    env_core = env.env  # 获取核心环境
    
    # 修改环境参数
    env_core.grid_size = grid_size  # 设置更大的地图
    env_core.obstacle_ratio = obstacle_ratio  # 设置更多的障碍物
    
    # 修改无人机数量
    original_agent_num = env_core.agent_num  # 保存原始无人机数量
    env_core.agent_num = num_agents  # 设置新的无人机数量
    
    # 重新初始化与无人机数量相关的变量
    env_core.uav_positions = np.zeros((num_agents, 2), dtype=int)
    env_core.uav_battery = np.ones(num_agents) * env_core.max_battery  # 使用max_battery而不是硬编码100
    env_core.path_history = [[] for _ in range(num_agents)]
    env_core.agent_coverage_maps = [np.zeros((env_core.grid_size, env_core.grid_size)) for _ in range(num_agents)]
    
    if num_agents > 2:
    # 计算需要的充电站数量 - 每2个无人机至少1个充电站
        required_stations = max(2, num_agents // 2 + num_agents % 2)
        env_core.charging_stations_cnt = required_stations

    # 更新通信数据结构
    env_core.communication_data = {}
    for i in range(num_agents):
        env_core.communication_data[i] = {
            'position': None,
            'battery': None,
            'target': None,
            'status': 'normal',
            'next_action': None,
            'role': 'follower' if i != 0 else 'leader',  # 添加角色字段
            'command': None,  # 添加命令字段
            'command_target': None,  # 添加命令目标字段
            'assigned_area': None  # 添加分配区域字段
        }
    
    # 设置第一个智能体为leader
    if num_agents > 0:
        env_core.communication_data[0]['role'] = 'leader'
    
    # 确保主从模式启用
    env_core.leader_follower_mode = True
    env_core.leader_id = 0
    
    # 重要：重新计算观测空间维度
    # 使用环境中已有的方法计算观测空间维度
    env_core.obs_dim = env_core._calculate_observation_dim()
    
    # 重置环境，让它重新计算观测空间
    env_core.reset()
    
    # 然后更新环境的观测空间和动作空间
    env.observation_space = []
    env.action_space = []
    for i in range(num_agents):
        # 使用env_core的obs_dim获取正确的观测空间维度
        obs_dim = env_core.obs_dim
        env.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))
        env.action_space.append(spaces.Discrete(5))  # 5个离散动作
    
    env.num_agent = num_agents
    
    # 创建args对象
    args = Args()
    
    # 创建策略网络
    actors = []
    
    # 重要：当无人机数量增加时，不能直接加载训练好的模型，因为输入维度不匹配
    # 解决方案：使用原始无人机数量的观测空间维度加载模型，然后在推理时进行适配
    
    # 创建一个临时环境，用于获取原始观测空间维度
    temp_env = UAVCPPDiscreteActionEnv()
    temp_env_core = temp_env.env
    temp_env_core.agent_num = 2  # 使用原始无人机数量(2)
    temp_env_core.obs_dim = temp_env_core._calculate_observation_dim()  # 计算原始观测空间维度
    temp_obs_dim = temp_env_core.obs_dim
    
    print(f"原始观测空间维度: {temp_obs_dim}, 新观测空间维度: {env_core.obs_dim}")
    
    if shared_policy:
        # 共享策略 - 使用单个actor.pt
        actor_model_path = model_dir / "actor.pt"
        # 使用原始观测空间维度初始化模型
        actor = R_Actor(args, spaces.Box(low=-np.inf, high=+np.inf, shape=(temp_obs_dim,), dtype=np.float32), env.action_space[0], device)
        actor.load_state_dict(torch.load(actor_model_path, map_location=device))
        actor.eval()
        actors = [actor] * num_agents
    else:
        # 主从架构 - 使用两个模型扩展到多个无人机
        # 加载主模型 (leader)
        leader_model_path = model_dir / "actor_agent0.pt"
        if not leader_model_path.exists():
            leader_model_path = model_dir / "actor.pt"
        
        # 加载从模型 (follower)
        follower_model_path = model_dir / "actor_agent1.pt"
        if not follower_model_path.exists():
            follower_model_path = model_dir / "actor.pt"
        
        print(f"加载主模型: {leader_model_path}")
        print(f"加载从模型: {follower_model_path}")
        
        # 初始化模型 - 使用原始观测空间维度
        leader_actor = R_Actor(args, spaces.Box(low=-np.inf, high=+np.inf, shape=(temp_obs_dim,), dtype=np.float32), env.action_space[0], device)
        leader_actor.load_state_dict(torch.load(leader_model_path, map_location=device))
        leader_actor.eval()
        
        follower_actor = R_Actor(args, spaces.Box(low=-np.inf, high=+np.inf, shape=(temp_obs_dim,), dtype=np.float32), env.action_space[0], device)
        follower_actor.load_state_dict(torch.load(follower_model_path, map_location=device))
        follower_actor.eval()
        
        # 分配模型给无人机 - 第一个使用主模型，其余使用从模型
        actors = [leader_actor]
        for i in range(1, num_agents):
            actors.append(follower_actor)
    
    # 用于记录测试结果的变量
    episode_rewards = []
    episode_coverages = []
    episode_lengths = []
    current_episode = 0
    current_step = 0
    episode_reward = 0
    
    # 添加路径跟踪变量 - 记录每个无人机的路径
    uav_paths = [[] for _ in range(env.num_agent)]
    
    # 重置环境
    obs = env.reset()
    
    # 为每个无人机记录初始位置
    for i in range(env.num_agent):
        uav_paths[i].append(env_core.uav_positions[i].copy())
    
    # 为RNN状态创建初始值 - 修改 masks 的形状
    rnn_states = torch.zeros(env.num_agent, 1, args.hidden_size).to(device)
    masks = torch.ones(env.num_agent, 1).to(device)
    
    # 创建图形
    fig, ax = plt.subplots(figsize=(10, 10))
    
    # 用于保存帧的列表
    frames = []
    
    # 可视化函数
    def update(frame):
        nonlocal obs, current_episode, current_step, episode_reward, rnn_states, masks, uav_paths, temp_obs_dim
        
        # 清除当前轴
        ax.clear()
        
        # 转换观测为张量并获取每个智能体的动作
        actions = []
        action_values = []  # 添加这行来存储动作值
        
        for i in range(env.num_agent):
            obs_tensor = torch.FloatTensor(obs[i]).unsqueeze(0).to(device)
            
            # 获取环境规划的下一步动作（如果有）
            planned_action = env_core.communication_data[i].get('next_action', None)
            
            if planned_action is not None:
                # 使用环境规划的动作
                action = planned_action
                actions.append(action)
                # 为了保持一致性，仍然获取动作概率用于显示
                with torch.no_grad():
                    # 重要：截取观测向量以匹配模型输入维度
                    if obs_tensor.shape[1] > temp_obs_dim:
                        obs_tensor_trimmed = obs_tensor[:, :temp_obs_dim]
                    else:
                        obs_tensor_trimmed = obs_tensor
                        
                    action_log_prob, _, new_rnn_state = actors[i](
                        obs_tensor_trimmed, 
                        rnn_states[i:i+1], 
                        masks[i:i+1]
                    )
                    action_prob = torch.exp(action_log_prob)
                    action_values.append(action_prob.cpu().numpy())
                    rnn_states[i:i+1] = new_rnn_state
            else:
                # 使用策略网络预测的动作
                with torch.no_grad():
                    # 重要：截取观测向量以匹配模型输入维度
                    if obs_tensor.shape[1] > temp_obs_dim:
                        obs_tensor_trimmed = obs_tensor[:, :temp_obs_dim]
                    else:
                        obs_tensor_trimmed = obs_tensor
                        
                    action, action_log_prob, new_rnn_state = actors[i](
                        obs_tensor_trimmed, 
                        rnn_states[i:i+1], 
                        masks[i:i+1]
                    )
                    action_prob = torch.exp(action_log_prob)
                    
                    # 存储动作和动作值
                    actions.append(action.squeeze().cpu().numpy())
                    action_values.append(action_prob.cpu().numpy())
                    rnn_states[i:i+1] = new_rnn_state
        
        # 执行动作
        obs, rewards, dones, infos = env.step(actions)
        
        # 记录每个无人机的新位置
        for i in range(env.num_agent):
            uav_paths[i].append(env_core.uav_positions[i].copy())
        
        # 更新 masks - 修改这里：确保维度正确
        masks = torch.FloatTensor([[0.0] if done else [1.0] for done in dones]).to(device)
        
        # 更新当前步数和奖励
        current_step += 1
        episode_reward += np.mean(rewards)
        
        # 绘制网格
        grid = env_core.grid
        ax.imshow(grid.T, origin='lower', cmap='Blues', alpha=0.6)
        
        # 绘制障碍物
        obstacles = env_core.obstacle_grid
        for i in range(env_core.grid_size):
            for j in range(env_core.grid_size):
                if obstacles[i, j] == 1:
                    ax.add_patch(plt.Rectangle((i-0.5, j-0.5), 1, 1, fill=True, color='gray', alpha=0.7))
        
        # 绘制充电站
        for station in env_core.charging_stations:
            ax.add_patch(plt.Rectangle((station[0]-0.4, station[1]-0.4), 0.8, 0.8, 
                                      fill=True, color='gold', alpha=0.7))
            ax.text(station[0], station[1], "⚡", fontsize=12, ha='center', va='center', color='black')
        
        # 绘制无人机路径 - 使用不同颜色
        path_colors = ['blue', 'red', 'green', 'purple', 'orange', 'cyan', 'magenta', 'yellow']  # 扩展颜色列表
        for i in range(env.num_agent):
            if len(uav_paths[i]) > 1:  # 确保有足够的点来绘制路径
                path_array = np.array(uav_paths[i])
                # 绘制路径线条，透明度随时间减弱
                for j in range(1, len(path_array)):
                    # 计算透明度 - 越新的路径越不透明
                    alpha = min(1.0, 0.3 + 0.7 * j / len(path_array))
                    ax.plot(path_array[j-1:j+1, 0], path_array[j-1:j+1, 1], 
                           color=path_colors[i % len(path_colors)], alpha=alpha, linewidth=1.5)
        
        # 绘制UAV及其电量信息
        for i, pos in enumerate(env_core.uav_positions):
            battery = env_core.uav_battery[i]
            battery_percent = battery / env_core.max_battery
            
            # 确定UAV角色
            role = "Leader" if i == env_core.leader_id else "Follower"
            
            # 绘制UAV - 使用与路径相同的颜色，但主机使用不同的标记
            if i == env_core.leader_id:
                # 主机使用三角形标记
                ax.plot(pos[0], pos[1], '^', markersize=10, color=path_colors[i % len(path_colors)])
            else:
                # 从机使用圆形标记
                ax.plot(pos[0], pos[1], 'o', markersize=8, color=path_colors[i % len(path_colors)])
            
            # 绘制电量条 - 在UAV旁边显示电量条
            bar_length = 0.8
            bar_height = 0.2
            bar_x = pos[0] - bar_length/2
            bar_y = pos[1] + 0.3
            
            # 电量背景条(灰色)
            ax.add_patch(plt.Rectangle((bar_x, bar_y), bar_length, bar_height, 
                                      fill=True, color='lightgray', alpha=0.7))
            
            # 电量前景条(绿色到红色)
            bar_color = (1-battery_percent, battery_percent, 0)
            ax.add_patch(plt.Rectangle((bar_x, bar_y), bar_length * battery_percent, bar_height, 
                                      fill=True, color=bar_color, alpha=0.9))
            
            # 添加UAV标签和电量文本，包含角色信息
            ax.text(pos[0], pos[1] - 0.4, f"{role}{i}", fontsize=8, 
                   color=path_colors[i % len(path_colors)], ha='center', weight='bold')
            ax.text(pos[0], pos[1] + 0.6, f"{battery:.1f}%", fontsize=7, 
                   ha='center', color='black')
        
            # 添加通信状态指示 - 新增
            if hasattr(env_core, 'communication_data') and env_core.enable_communication:
                status = env_core.communication_data[i]['status']
                target = env_core.communication_data[i]['target']
                next_action = env_core.communication_data[i].get('next_action', None)
                
                # 显示通信状态
                status_text = ""
                if status == 'low_battery':
                    status_text = "Low Battery"
                elif status == 'critical_battery':
                    status_text = "Critical Battery"
                elif status == 'charging':
                    status_text = "Charging"
                
                if status_text:
                    ax.text(pos[0], pos[1] - 0.8, status_text, fontsize=7,
                           ha='center', color='purple')
                
                # 如果有目标，绘制目标指示线
                if target is not None:
                    # 绘制从当前位置到目标的虚线
                    ax.plot([pos[0], target[0]], [pos[1], target[1]], 
                           color=path_colors[i % len(path_colors)], linestyle='--', linewidth=0.8, alpha=0.5)
                    # 在目标位置绘制小标记
                    ax.plot(target[0], target[1], 'x', color=path_colors[i % len(path_colors)], markersize=5, alpha=0.7)
                    # 显示目标坐标
                    ax.text(target[0], target[1] + 0.3, f"T{i}({target[0]},{target[1]})", 
                           fontsize=6, color=path_colors[i % len(path_colors)], ha='center')
                
                # 显示下一步动作
                if next_action is not None:
                    action_text = ["↑", "→", "↓", "←", "⚡"][next_action]
                    ax.text(pos[0] + 0.4, pos[1], action_text, fontsize=10, 
                           color=path_colors[i % len(path_colors)], ha='center', weight='bold')
                
            # 添加电量警告指示
            if battery_percent < 0.2:  # 电量低于20%时显示警告
                # 闪烁效果 - 根据当前步数决定是否显示
                if current_step % 2 == 0:
                    # 使用英文替代中文，避免字体问题
                    ax.text(pos[0], pos[1] + 0.8, "! Low Battery !", fontsize=8, 
                           ha='center', color='red', weight='bold')
                    
                    # 添加闪烁的红色圆圈
                    circle = plt.Circle((pos[0], pos[1]), 0.6, fill=False, 
                                       color='red', linestyle='--', linewidth=1.5)
                    ax.add_patch(circle)
        
        # 计算当前覆盖率
        coverage = env_core.covered_cells / env_core.total_cells * 100
        
        # 设置标题 - 使用英文以避免字体问题
        ax.set_title(f"Episode: {current_episode+1}/{test_episodes}, Step: {current_step}\n"
                    f"Coverage: {coverage:.2f}%, Reward: {np.mean(rewards):.2f}, Total: {episode_reward:.2f}")
        
        # 设置轴范围
        ax.set_xlim(-0.5, env_core.grid_size-0.5)
        ax.set_ylim(-0.5, env_core.grid_size-0.5)
        
        # 如果需要保存动画，每10步保存一帧
        if save_animation and current_step % 10 == 0:
            # 使用更可靠的方法保存当前帧
            try:
                # 使用plt.savefig保存到内存中
                from io import BytesIO
                buf = BytesIO()
                fig.savefig(buf, format='rgba', dpi=80)
                buf.seek(0)
                # 从内存中读取图像
                from PIL import Image
                img = Image.open(buf)
                # 转换为RGB并添加到帧列表
                img_rgb = img.convert('RGB')
                frames.append(np.array(img_rgb))
                # 显示进度
                print(f"\r保存帧: {len(frames)}", end="")
            except Exception as e:
                print(f"\r保存帧错误: {e}，跳过此帧", end="")
        
        # 如果回合结束或达到最大步数
        if np.all(dones) or current_step >= env_core.max_steps:
            # 记录本回合的结果
            episode_rewards.append(episode_reward)
            episode_coverages.append(coverage)
            episode_lengths.append(current_step)
            
            print(f"\n回合 {current_episode+1} 完成:")
            print(f"  总奖励: {episode_reward:.2f}")
            print(f"  最终覆盖率: {coverage:.2f}%")
            print(f"  回合长度: {current_step}")
            
            # 更新回合计数
            current_episode += 1
            
            # 如果已完成所有测试回合，则停止动画
            if current_episode >= test_episodes:
                # 显示测试结果统计
                print("\n测试结果统计:")
                print(f"  平均总奖励: {np.mean(episode_rewards):.2f} ± {np.std(episode_rewards):.2f}")
                print(f"  平均覆盖率: {np.mean(episode_coverages):.2f}% ± {np.std(episode_coverages):.2f}%")
                print(f"  平均回合长度: {np.mean(episode_lengths):.2f} ± {np.std(episode_lengths):.2f}")
                
                # 停止动画
                ani.event_source.stop()
                return
            
            # 重置路径跟踪
            uav_paths = [[] for _ in range(env.num_agent)]
            
            # 重置环境和计数器
            obs = env.reset()
            
            # 记录初始位置
            for i in range(env.num_agent):
                uav_paths[i].append(env_core.uav_positions[i].copy())
            
            # 重置环境和计数器
            obs = env.reset()
            current_step = 0
            episode_reward = 0
            # 重置RNN状态 - 修改 masks 的形状
            rnn_states = torch.zeros(env.num_agent, 1, args.hidden_size).to(device)
            masks = torch.ones(env.num_agent, 1).to(device)
    
    # 创建动画
    ani = FuncAnimation(fig, update, frames=test_episodes * env_core.max_steps, interval=100, repeat=False)
    
    # 保存动画 - 使用新的方式保存
    if save_animation:
        try:
            # 使用手动收集的帧保存GIF
            if len(frames) > 0:
                print("\n正在保存动画为GIF文件...")
                import imageio
                # 降低分辨率以减小文件大小
                resized_frames = []
                for frame in frames:
                    from PIL import Image
                    img = Image.fromarray(frame)
                    # 将图像调整为原来的一半大小
                    img_resized = img.resize((img.width // 2, img.height // 2), Image.LANCZOS)
                    resized_frames.append(np.array(img_resized))
                
                # 保存为GIF，使用较低的帧率
                imageio.mimsave('uav_cpp_behavior.gif', resized_frames, fps=3)
                print("动画已保存为 'uav_cpp_behavior.gif'")
            else:
                print("\n没有收集到帧，无法保存GIF。请检查是否正确执行了动画更新函数。")
        except Exception as e:
            print(f"\n保存动画时出错: {e}")
            print("尝试显示动画而不保存...")
    
    # 显示图形
    plt.show()

def test_model_without_visualization(test_episodes=10, shared_policy=True,
                                    grid_size=10, obstacle_ratio=0.1,
                                    num_agents=2):  # 添加无人机数量参数
    # 设置设备
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    # 创建环境
    env = UAVCPPDiscreteActionEnv()
    env_core = env.env  # 获取核心环境
    
    # 修改环境参数
    env_core.grid_size = grid_size
    env_core.obstacle_ratio = obstacle_ratio
    
    # 修改无人机数量
    original_agent_num = env_core.agent_num
    env_core.agent_num = num_agents
    
    # 重新初始化与无人机数量相关的变量
    env_core.uav_positions = np.zeros((num_agents, 2), dtype=int)
    env_core.uav_battery = np.ones(num_agents) * env_core.max_battery
    env_core.path_history = [[] for _ in range(num_agents)]
    env_core.agent_coverage_maps = [np.zeros((env_core.grid_size, env_core.grid_size)) for _ in range(num_agents)]
    
    # 更新通信数据结构
    env_core.communication_data = {}
    for i in range(num_agents):
        env_core.communication_data[i] = {
            'position': None,
            'battery': None,
            'target': None,
            'status': 'normal',
            'next_action': None,
            'role': 'follower'  # 添加角色字段，默认为follower
        }
    
    # 设置第一个智能体为leader
    if num_agents > 0:
        env_core.communication_data[0]['role'] = 'leader'
    
    # 确保主从模式启用
    env_core.leader_follower_mode = True
    env_core.leader_id = 0
    
    # 重要：重新计算观测空间维度
    # 使用环境中已有的方法计算观测空间维度
    env_core.obs_dim = env_core._calculate_observation_dim()
    
    # 重置环境，让它重新计算观测空间
    env_core.reset()
    
    # 然后更新环境的观测空间和动作空间
    env.observation_space = []
    env.action_space = []
    for i in range(num_agents):
        # 使用env_core的obs_dim获取正确的观测空间维度
        obs_dim = env_core.obs_dim
        env.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))
        env.action_space.append(spaces.Discrete(5))  # 5个离散动作
    
    env.num_agent = num_agents
    
    # 创建args对象
    args = Args()
    
    # 创建一个临时环境，用于获取原始观测空间维度
    temp_env = UAVCPPDiscreteActionEnv()
    temp_env_core = temp_env.env
    temp_env_core.agent_num = 2  # 使用原始无人机数量(2)
    temp_env_core.obs_dim = temp_env_core._calculate_observation_dim()  # 计算原始观测空间维度
    temp_obs_dim = temp_env_core.obs_dim
    
    print(f"原始观测空间维度: {temp_obs_dim}, 新观测空间维度: {env_core.obs_dim}")
    
    # 创建策略网络
    actors = []
    
    if shared_policy:
        # 共享策略 - 使用单个actor.pt
        actor_model_path = model_dir / "actor.pt"
        # 使用原始观测空间维度初始化模型
        actor = R_Actor(args, spaces.Box(low=-np.inf, high=+np.inf, shape=(temp_obs_dim,), dtype=np.float32), env.action_space[0], device)
        actor.load_state_dict(torch.load(actor_model_path, map_location=device))
        actor.eval()
        actors = [actor] * env.num_agent
    else:
        # 主从架构 - 使用两个模型扩展到多个无人机
        # 加载主模型 (leader)
        leader_model_path = model_dir / "actor_agent0.pt"
        if not leader_model_path.exists():
            leader_model_path = model_dir / "actor.pt"
        
        # 加载从模型 (follower)
        follower_model_path = model_dir / "actor_agent1.pt"
        if not follower_model_path.exists():
            follower_model_path = model_dir / "actor.pt"
        
        print(f"加载主模型: {leader_model_path}")
        print(f"加载从模型: {follower_model_path}")
        
        # 初始化模型 - 使用原始观测空间维度
        leader_actor = R_Actor(args, spaces.Box(low=-np.inf, high=+np.inf, shape=(temp_obs_dim,), dtype=np.float32), env.action_space[0], device)
        leader_actor.load_state_dict(torch.load(leader_model_path, map_location=device))
        leader_actor.eval()
        
        follower_actor = R_Actor(args, spaces.Box(low=-np.inf, high=+np.inf, shape=(temp_obs_dim,), dtype=np.float32), env.action_space[0], device)
        follower_actor.load_state_dict(torch.load(follower_model_path, map_location=device))
        follower_actor.eval()
        
        # 分配模型给无人机 - 第一个使用主模型，其余使用从模型
        actors = [leader_actor]
        for i in range(1, num_agents):
            actors.append(follower_actor)
    
    # 用于记录测试结果的变量
    episode_rewards = []
    episode_coverages = []
    episode_lengths = []
    
    for episode in range(test_episodes):
        # 重置环境
        obs = env.reset()
        episode_reward = 0
        step = 0
        done = False
        
        # 为RNN状态创建初始值 - 修复masks维度
        rnn_states = torch.zeros(env.num_agent, 1, args.hidden_size).to(device)
        masks = torch.ones(env.num_agent, 1).to(device)  # 修改这里，去掉多余的维度
        
        # 修改动作获取部分
        while not done and step < env_core.max_steps:
            # 获取每个智能体的动作
            actions = []
            for i in range(env.num_agent):
                obs_tensor = torch.FloatTensor(obs[i]).unsqueeze(0).to(device)
                
                # 重要：截取观测向量以匹配模型输入维度
                if obs_tensor.shape[1] > temp_obs_dim:
                    obs_tensor_trimmed = obs_tensor[:, :temp_obs_dim]
                else:
                    obs_tensor_trimmed = obs_tensor
                
                # 使用对应智能体的策略网络获取动作，添加rnn_states和masks参数
                with torch.no_grad():
                    action, _, new_rnn_state = actors[i](
                        obs_tensor_trimmed, 
                        rnn_states[i:i+1], 
                        masks[i:i+1]
                    )
                    actions.append(action.squeeze().cpu().numpy())
                    rnn_states[i:i+1] = new_rnn_state
            
            # 执行动作
            obs, rewards, dones, infos = env.step(actions)
            
            # 更新masks，如果回合结束则重置RNN状态 - 修复维度
            masks = torch.FloatTensor([[0.0] if done else [1.0] for done in dones]).to(device)
            
            # 更新步数和奖励
            step += 1
            episode_reward += np.mean(rewards)
            
            # 检查是否完成
            done = np.all(dones)
        
        # ... 其余代码保持不变 ...
        
        # 计算最终覆盖率
        coverage = env_core.covered_cells / env_core.total_cells * 100
        
        # 记录本回合的结果
        episode_rewards.append(episode_reward)
        episode_coverages.append(coverage)
        episode_lengths.append(step)
        
        print(f"回合 {episode+1}/{test_episodes}:")
        print(f"  总奖励: {episode_reward:.2f}")
        print(f"  最终覆盖率: {coverage:.2f}%")
        print(f"  回合长度: {step}")
    
    # 显示测试结果统计
    print("\n测试结果统计:")
    print(f"  平均总奖励: {np.mean(episode_rewards):.2f} ± {np.std(episode_rewards):.2f}")
    print(f"  平均覆盖率: {np.mean(episode_coverages):.2f}% ± {np.std(episode_coverages):.2f}%")
    print(f"  平均回合长度: {np.mean(episode_lengths):.2f} ± {np.std(episode_lengths):.2f}")
    
    return np.mean(episode_rewards), np.mean(episode_coverages)

# 配置文件路径
def get_config_file_path():
    return Path(os.path.dirname(os.path.abspath(__file__))) / "last_config.json"

# 保存配置到文件
def save_config(config):
    config_file = get_config_file_path()
    with open(config_file, "w", encoding="utf-8") as f:
        json.dump(config, f, ensure_ascii=False, indent=4)
    print(f"配置已保存到 {config_file}")

# 从文件加载配置
def load_config():
    config_file = get_config_file_path()
    if config_file.exists():
        try:
            with open(config_file, "r", encoding="utf-8") as f:
                return json.load(f)
        except Exception as e:
            print(f"加载配置文件失败: {e}")
    return None

if __name__ == "__main__":
    # 检查是否有上一次的配置
    last_config = load_config()
    use_last_config = False
    
    if last_config:
        print("\n发现上一次的配置:")
        print(f"测试模式: {'可视化测试' if last_config['test_mode'] == '1' else '快速测试'}")
        print(f"策略类型: {'共享策略' if last_config['shared_policy'] else '分离策略'}")
        print(f"环境复杂度: {last_config['grid_size']}x{last_config['grid_size']}网格, {int(last_config['obstacle_ratio']*100)}%障碍物")
        print(f"无人机数量: {last_config['num_agents']}个")
        if last_config['test_mode'] == '1':
            print(f"保存动画: {'是' if last_config['save_animation'] else '否'}")
        
        use_last_choice = input("\n是否使用上一次的配置? (y/n): ")
        use_last_config = use_last_choice.lower() == 'y'
    
    if use_last_config:
        # 使用上一次的配置
        choice = last_config['test_mode']
        shared_policy = last_config['shared_policy']
        grid_size = last_config['grid_size']
        obstacle_ratio = last_config['obstacle_ratio']
        num_agents = last_config['num_agents']
        save_animation = last_config.get('save_animation', True)
    else:
        # 选择测试模式
        print("请选择测试模式:")
        print("1. 可视化测试 (较慢，但可以看到智能体行为)")
        print("2. 快速测试 (仅输出统计结果)")
        
        choice = input("请输入选项 (1 或 2): ")
        
        # 确定是否使用共享策略
        print("\n请选择策略类型:")
        print("1. 共享策略 (使用actor.pt)")
        print("2. 分离策略 (使用actor_agent0.pt, actor_agent1.pt等)")
        
        policy_choice = input("请输入选项 (1 或 2): ")
        shared_policy = policy_choice != "2"
        
        # 添加环境复杂度选项
        print("\n请选择环境复杂度:")
        print("1. 标准环境 (10x10网格, 10%障碍物)")
        print("2. 中等复杂 (15x15网格, 15%障碍物)")
        print("3. 高度复杂 (20x20网格, 20%障碍物)")
        
        env_choice = input("请输入选项 (1, 2 或 3): ")
        
        # 根据选择设置环境参数
        grid_size = 10
        obstacle_ratio = 0.1
        
        if env_choice == "2":
            grid_size = 15
            obstacle_ratio = 0.15
        elif env_choice == "3":
            grid_size = 20
            obstacle_ratio = 0.2
        
        # 添加无人机数量选项
        print("\n请选择无人机数量:")
        print("1. 标准配置 (2个无人机)")
        print("2. 扩展配置 (3个无人机)")
        print("3. 大规模配置 (5个无人机)")
        
        uav_choice = input("请输入选项 (1, 2 或 3): ")
        
        # 根据选择设置无人机数量
        num_agents = 2
        if uav_choice == "2":
            num_agents = 3
        elif uav_choice == "3":
            num_agents = 5
        
        # 添加是否保存动画的选项
        save_animation = True
        if choice == "1":
            save_choice = input("\n是否保存动画为GIF? (y/n): ")
            save_animation = save_choice.lower() == 'y'
        
        # 保存当前配置
        current_config = {
            'test_mode': choice,
            'shared_policy': shared_policy,
            'grid_size': grid_size,
            'obstacle_ratio': obstacle_ratio,
            'num_agents': num_agents,
            'save_animation': save_animation
        }
        save_config(current_config)
        
    

    
    # 可视化测试
    visualize_agent_behavior(test_episodes=3, save_animation=save_animation, 
                            shared_policy=shared_policy,
                            grid_size=grid_size, obstacle_ratio=obstacle_ratio,
                            num_agents=num_agents)  # 添加无人机数量参数