import gymnasium as gym
import torch
import os
import csv
import argparse
import json
import datetime
import multiprocessing as mp
import sys
import signal
from math import ceil
from copy import deepcopy
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Categorical
from Hunt_Env.Hunt_env.envs.Hunt_world import HunterEnv
from Hunt_Env.Hunt_env.envs.decision.rule_based_decision import rule_based_decision
from ppo import PPOAgent, Memory, ActorCritic
from gymnasium.wrappers import RecordVideo
from gymnasium.wrappers import FlattenObservation
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="pygame")
# 全局变量，用于跟踪是否接收到终止信号
exiting = False

# 全局信号处理函数
def signal_handler(signum, frame):
    global exiting
    if not exiting:
        exiting = True
        print("\n检测到终止信号，正在清理资源...")
        # 设置一个定时器，如果清理时间过长，强制退出
        signal.alarm(5)

# 注册信号处理程序
signal.signal(signal.SIGINT, signal_handler)  # 处理Ctrl+C
signal.signal(signal.SIGTERM, signal_handler)  # 处理终止信号

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def process_observation(obs):
    # 将观测转换为张量，始终使用CPU以避免设备不匹配问题
    processed_obs = []
    for key, value in obs.items():
        if isinstance(value, dict):
            processed_obs.append(process_observation(value))
        else:
            tensor_value = torch.tensor(value, dtype=torch.float32, device='cpu')
            if tensor_value.dim() == 0:  # 如果是零维张量，转换为一维张量
                tensor_value = tensor_value.unsqueeze(0)
            processed_obs.append(tensor_value.view(-1,))
    return torch.cat(processed_obs)

class TrainingLogger:
    def __init__(self, config):
        # 从配置中获取参数
        self.log_dir = config['log_dir']
        self.csv_file_name = config.get('csv_file', 'training_stats.csv')
        self.enable_tensorboard = config.get('enable_tensorboard', True)
        self.enable_csv = config.get('enable_csv', True)
        
        # 创建日志目录
        timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
        self.log_dir = os.path.join(self.log_dir, f'train_{timestamp}')
        os.makedirs(self.log_dir, exist_ok=True)
        
        # 初始化TensorBoard writer
        self.tb_writer = None
        if self.enable_tensorboard:
            self.tb_writer = SummaryWriter(self.log_dir)
        
        # 初始化CSV文件
        self.csv_file = None
        self.csv_fields = [
            'episode', 'avg_reward_hunter', 'avg_reward_escaper', 
            'total_loss', 'actor_surr_loss1', 'actor_surr_loss2', 'critic_value_loss', 'total_entropy',
            'test_avg_reward_hunter', 'test_avg_reward_escaper',
            'test_hunter_wins', 'test_escaper_wins', 'timestamp'
        ]
        
        if self.enable_csv:
            self.csv_file = os.path.join(self.log_dir, self.csv_file_name)
            # 创建CSV文件并写入表头
            with open(self.csv_file, 'w', newline='') as f:
                writer = csv.DictWriter(f, fieldnames=self.csv_fields)
                writer.writeheader()
        
        # 初始化数据收集缓冲区
        self.stats_buffer = {}
        
        # 打印日志信息
        print(f"日志目录: {self.log_dir}")
        if self.enable_tensorboard:
            print(f"TensorBoard日志已启用")
        if self.enable_csv:
            print(f"CSV文件: {self.csv_file}")
    
    def log_scalar(self, tag, value, step):
        """记录标量值到TensorBoard"""
        if self.enable_tensorboard and self.tb_writer:
            self.tb_writer.add_scalar(tag, value, step)
    
    def log_scalars(self, main_tag, tag_scalar_dict, step):
        """记录多个标量值到TensorBoard的同一组下"""
        if self.enable_tensorboard and self.tb_writer:
            self.tb_writer.add_scalars(main_tag, tag_scalar_dict, step)
    
    def accumulate_stats(self, stats_key, value):
        """累积统计数据到缓冲区"""
        if stats_key not in self.stats_buffer:
            self.stats_buffer[stats_key] = []
        self.stats_buffer[stats_key].append(value)
    
    def get_average_stats(self, stats_key):
        """获取缓冲区中指定统计数据的平均值"""
        if stats_key in self.stats_buffer and self.stats_buffer[stats_key]:
            return sum(self.stats_buffer[stats_key]) / len(self.stats_buffer[stats_key])
        return 0
    
    def clear_stats_buffer(self, stats_key=None):
        """清空指定统计数据的缓冲区或所有缓冲区"""
        if stats_key is not None and stats_key in self.stats_buffer:
            self.stats_buffer[stats_key] = []
        elif stats_key is None:
            self.stats_buffer = {}
    
    def log_training_stats(self, episode, stats):
        """记录训练统计信息到CSV文件"""
        if not self.enable_csv:
            return
        
        # 添加时间戳
        stats['timestamp'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        
        # 写入CSV文件
        with open(self.csv_file, 'a', newline='') as f:
            writer = csv.DictWriter(f, fieldnames=self.csv_fields)
            writer.writerow(stats)
    
    def close(self):
        """关闭日志记录器"""
        if self.tb_writer:
            self.tb_writer.close()

def test_agent(env, ppo_hunter, num_test_episodes=10, max_timesteps=100):
    """
    独立测试智能体性能
    
    参数:
        env: 测试环境
        ppo_hunter: 训练好的PPO智能体
        num_test_episodes: 测试的回合数
        max_timesteps: 每回合最大步数
    
    返回:
        dict: 测试结果字典，包含平均奖励和胜利统计
    """
    # 使用collections.deque来高效管理最近的奖励记录
    from collections import deque
    test_rewards_hunter = deque(maxlen=num_test_episodes)
    test_rewards_escaper = deque(maxlen=num_test_episodes)
    
    hunter_wins = 0
    escaper_wins = 0
    
    # 保存当前环境的渲染模式
    original_render_mode = env.render_mode
    env.render_mode = None  # 测试时不渲染
    
    try:
        for episode in range(num_test_episodes):
            state = env.reset()
            state_hunter = process_observation(state['agent_0'])
            episode_reward_hunter = 0
            episode_reward_escaper = 0
            
            for t in range(max_timesteps):
                # 选择动作
                with torch.no_grad():
                    (action_hunter1, action_hunter2), _ = ppo_hunter.select_action(state_hunter)
                (action_escaper1, action_escaper2) = rule_based_decision(state['agent_1'], '逃脱者')
                
                # 将动作张量转换为numpy数组（确保在CPU上）
                action_hunter1 = action_hunter1.cpu().numpy().item()
                action_hunter2 = action_hunter2.cpu().numpy().item()
                
                action = {'agent_0': [action_hunter1, action_hunter2], 'agent_1': [action_escaper1, action_escaper2]}
                state, _, done, _, info = env.step(action)
                state_hunter = process_observation(state['agent_0'])
                
                # 累积奖励
                episode_reward_hunter += info['reward']['agent_0']
                episode_reward_escaper += info['reward']['agent_1']
                
                if done:
                    # 如果在最大步数前结束，判定为猎手胜利
                    if t < max_timesteps - 1:
                        hunter_wins += 1
                    else:
                        escaper_wins += 1
                    break
            
            # 如果达到最大步数且未结束，判定为逃脱者胜利
            if not done:
                escaper_wins += 1
            
            # 使用deque自动管理长度
            test_rewards_hunter.append(episode_reward_hunter)
            test_rewards_escaper.append(episode_reward_escaper)
    finally:
        # 恢复环境的渲染模式
        env.render_mode = original_render_mode
    
    # 计算平均奖励
    avg_reward_hunter = sum(test_rewards_hunter) / len(test_rewards_hunter)
    avg_reward_escaper = sum(test_rewards_escaper) / len(test_rewards_escaper)
    
    return {
        'avg_reward_hunter': avg_reward_hunter,
        'avg_reward_escaper': avg_reward_escaper,
        'hunter_wins': hunter_wins,
        'escaper_wins': escaper_wins
    }

def load_config(config_file=None):
    """
    加载配置文件
    
    参数:
        config_file: 配置文件路径
    
    返回:
        dict: 配置字典
    """
    # 默认配置
    config = {
        # 训练参数
        'max_episodes': 150,
        'max_timesteps': 100,
        'buffer_size': 8192,
        'entropy_weight': 0.01,
        'load_agent_weight': "models/hunter_model_max_win_10.pth",
        
        # 日志参数
        'log_dir': 'logs',
        'csv_file': 'training_stats.csv',
        'enable_tensorboard': True,
        'enable_csv': True,
        'log_interval': 1,
        
        # 测试参数
        'test_interval': 1,
        'num_test_episodes': 10,
        
        # 视频记录参数
        'record_video': False,
        'video_folder': 'videos',
        'video_interval': 50,
        # 多进程参数
        'num_processes': 8,
    }
    
    # 从配置文件加载参数
    if config_file and os.path.exists(config_file):
        try:
            with open(config_file, 'r') as f:
                file_config = json.load(f)
                config.update(file_config)
            print(f"从配置文件加载参数: {config_file}")
        except Exception as e:
            print(f"加载配置文件失败: {e}")
    
    return config

def parse_arguments():
    """
    解析命令行参数
    
    返回:
        argparse.Namespace: 解析后的参数
    """
    parser = argparse.ArgumentParser(description='训练PPO猎手智能体')
    
    # 配置文件参数
    parser.add_argument('--config', type=str, help='配置文件路径')
    
    # 训练参数
    parser.add_argument('--max-episodes', type=int, help='最大训练回合数')
    parser.add_argument('--max-timesteps', type=int, help='每回合最大步数')
    parser.add_argument('--buffer-size', type=int, help='经验回放缓冲区大小')
    parser.add_argument('--load-agent-weight',type=str, help='加载智能体权重文件路径')
    # 日志参数
    parser.add_argument('--log-dir', type=str,  help='日志目录')
    parser.add_argument('--log-interval', type=int, help='日志记录间隔')
    parser.add_argument('--disable-tensorboard', action='store_true', help='禁用TensorBoard记录')
    parser.add_argument('--disable-csv', action='store_true', help='禁用CSV记录')
    
    # 测试参数
    parser.add_argument('--test-interval', type=int, help='测试间隔')
    parser.add_argument('--num-test-episodes', type=int, help='每次测试的回合数')
    
    # 视频记录参数
    parser.add_argument('--no-video', action='store_true', help='禁用视频记录')
    parser.add_argument('--video-interval', type=int, default=50, help='视频记录间隔')
    
    # PPO算法参数
    parser.add_argument('--entropy-weight', type=float, help='熵正则化权重，控制代理探索性 (推荐范围: 0.001-0.3)')
    
    # 多进程参数
    parser.add_argument('--num-processes', type=int, help='数据采集使用的进程数')
    
    return parser.parse_args()

def update_config_from_args(config, args):
    """
    从命令行参数更新配置
    
    参数:
        config: 配置字典
        args: 命令行参数
    """
    # 更新训练参数
    if args.max_episodes is not None:
        config['max_episodes'] = args.max_episodes
    if args.max_timesteps is not None:
        config['max_timesteps'] = args.max_timesteps
    if args.buffer_size is not None:
        config['buffer_size'] = args.buffer_size
    if args.load_agent_weight is not None:
        config['load_agent_weight'] = args.load_agent_weight
    
    # 更新日志参数
    if args.log_dir is not None:
        config['log_dir'] = args.log_dir
    if args.log_interval is not None:
        config['log_interval'] = args.log_interval
    if args.disable_tensorboard:
        config['enable_tensorboard'] = False
    if args.disable_csv:
        config['enable_csv'] = False
    
    # 更新测试参数
    if args.test_interval is not None:
        config['test_interval'] = args.test_interval
    if args.num_test_episodes is not None:
        config['num_test_episodes'] = args.num_test_episodes
    
    # 更新视频记录参数
    if args.no_video:
        config['record_video'] = False
    if args.video_interval is not None:
        config['video_interval'] = args.video_interval
    
    # 更新PPO算法参数
    if args.entropy_weight is not None:
        config['entropy_weight'] = args.entropy_weight
    
    # 更新多进程参数
    if args.num_processes is not None:
        config['num_processes'] = args.num_processes

def collect_data_process(process_id, env_config, model_state_dict, buffer_size, max_timesteps, num_episodes):
    """
    在进程中采集数据
    
    参数:
        process_id: 进程ID
        env_config: 环境配置
        model_state_dict: 模型状态字典
        buffer_size: 缓冲区大小
        max_timesteps: 每回合最大步数
        num_episodes: 需要采集的对局数量
    
    返回:
        dict: 采集的数据，包括states, actions, log_probs, rewards, dones
    """
    # 在进程中创建独立的环境和模型实例
    env = HunterEnv(render_mode='rgb_array')
    
    # 创建PPO模型并加载状态字典，确保使用CPU
    # 只需要创建用于动作选择的模型，不需要完整的PPOAgent
    # 从环境中获取输入输出维度
    input_dim, action_dim1, action_dim2 = env.unwrapped.get_agent_dims(0)
    
    # 创建ActorCritic模型
    policy = ActorCritic(input_dim, action_dim1, action_dim2).cpu()
    policy.load_state_dict(model_state_dict['policy'])
    policy.eval()
    
    # 模型已经通过policy.eval()设置为评估模式
    
    # 采集数据
    collected_states = []
    collected_actions = []
    collected_log_probs = []
    collected_rewards = []
    collected_dones = []
    
    total_steps = 0
    for episode in range(num_episodes):
        # 检查是否需要退出
        if exiting:
            break
            
        state = env.reset()
        state_hunter = process_observation(state['agent_0'])
        episode_steps = 0
        
        while episode_steps < max_timesteps:
            # 检查是否需要退出
            if exiting:
                break
                
            # 选择动作
            with torch.no_grad():
                action_probs1, action_probs2, _ = policy(state_hunter)
            dist1 = Categorical(action_probs1)
            dist2 = Categorical(action_probs2)
            action_hunter1 = dist1.sample()
            action_hunter2 = dist2.sample()
            log_prob_hunter1 = dist1.log_prob(action_hunter1)
            log_prob_hunter2 = dist2.log_prob(action_hunter2)
            (action_escaper1, action_escaper2) = rule_based_decision(state['agent_1'], '逃脱者')
            
            # 将动作张量转换为numpy数组（确保在CPU上）
            action_hunter1 = action_hunter1.cpu().numpy().item()
            action_hunter2 = action_hunter2.cpu().numpy().item()
            
            action = {'agent_0': [action_hunter1, action_hunter2], 'agent_1': [action_escaper1, action_escaper2]}
            next_state, _, done, _, info = env.step(action)
            next_state_hunter = process_observation(next_state['agent_0'])
            
            # 存储数据，确保所有张量都在CPU上
            collected_states.append(state_hunter.to('cpu'))
            collected_actions.append(torch.tensor([action_hunter1, action_hunter2], device='cpu'))
            collected_log_probs.append(torch.tensor([log_prob_hunter1, log_prob_hunter2], device='cpu'))
            collected_rewards.append(info['reward']['agent_0'])
            collected_dones.append(done)
            
            total_steps += 1
            episode_steps += 1
            state_hunter = next_state_hunter
            
            if done:
                break
    
    # 关闭环境
    env.close()
    
    # 返回采集的数据
    return {
        'process_id': process_id,
        'states': collected_states,
        'actions': collected_actions,
        'log_probs': collected_log_probs,
        'rewards': collected_rewards,
        'dones': collected_dones,
        'total_steps': total_steps
    }

def main():
    # 导入高效数据结构
    from collections import deque
    import pickle
    import os
    
    # 解析命令行参数
    args = parse_arguments()
    
    # 加载配置
    config = load_config(args.config)
    
    # 从命令行参数更新配置
    update_config_from_args(config, args)
    
    # 创建日志记录器
    logger = TrainingLogger(config)
    
    # 创建环境和PPO算法实例
    env = HunterEnv(render_mode='rgb_array')
    
    # 根据配置决定是否记录视频
    if config['record_video']:
        env = RecordVideo(env, 
                         video_folder=config['video_folder'], 
                         episode_trigger=lambda x: x % config['video_interval'] == 0)
    
    # 获取熵权重参数，默认为0.01
    entropy_weight = config.get('entropy_weight', 0.01)
    ppo_hunter = PPOAgent(env, agent_id=0, 
                          lr=3e-4,  # 默认学习率
                          gamma=0.99,  # 折扣因子，影响长期奖励的重要性
                          eps_clip=0.2,  # PPO的裁剪参数，控制更新幅度
                          k_epochs=10,  # 默认迭代次数
                          buffer_size=config['buffer_size'],  # 经验回放缓冲区大小
                          entropy_weight=entropy_weight,  # 使用配置的熵权重
                          actor_weight=0.5,  # 默认演员权重
                          critic_weight=1.0)  # 默认价值函数权重
    
    # 行为克隆预训练
    expert_data_path = 'expert_data/expert_trajectories.pkl'
    if os.path.exists(expert_data_path):
        print(f"\n发现专家数据: {expert_data_path}")
        print("开始行为克隆预训练...")
        
        # 加载专家数据
        with open(expert_data_path, 'rb') as f:
            expert_data = pickle.load(f)
        
        # 进行行为克隆训练
        bc_loss = ppo_hunter.behavior_clone_train(expert_data, epochs=10, batch_size=4096)
        
        # 保存预训练模型
        bc_pretrain_path = 'models/hunter_bc_pretrain.pth'
        ppo_hunter.save(bc_pretrain_path)
        print(f"行为克隆预训练完成！")
        print(f"预训练模型保存到: {bc_pretrain_path}")
        print(f"预训练损失: {bc_loss['total_loss']:.4f}")
    else:
        print(f"\n未发现专家数据: {expert_data_path}")
        print("跳过行为克隆预训练，直接进行PPO训练")
    
    # 打印配置信息
    print("\n训练配置:")
    print(f"  最大训练回合数: {config['max_episodes']}")
    print(f"  每回合最大步数: {config['max_timesteps']}")
    print(f"  经验回放缓冲区大小: {config['buffer_size']}")
    if config['load_agent_weight'] is not None:
        print(f"  加载智能体权重文件: {config['load_agent_weight']}")
    print(f"  日志记录间隔: {config['log_interval']}")
    print(f"  测试间隔: {config['test_interval']}")
    print(f"  每次测试回合数: {config['num_test_episodes']}")
    print(f"  视频记录: {'启用' if config['record_video'] else '禁用'}")
    if config['record_video']:
        print(f"  视频记录间隔: {config['video_interval']}")
    print(f"  熵正则化权重: {entropy_weight}")
    print()
    
    # 使用deque高效管理最近的奖励记录，自动维护固定长度
    recent_rewards_hunter = deque(maxlen=config['log_interval'])
    recent_rewards_escaper = deque(maxlen=config['log_interval'])
    
    # 定义训练参数
    max_epochs = config['max_episodes']  # 使用max_episodes作为max_epochs
    max_timesteps = config['max_timesteps']
    buffer_size = config['buffer_size']
    log_interval = config['log_interval']
    test_interval = config['test_interval']
    
    # 多进程配置
    num_processes = config.get('num_processes', mp.cpu_count())  # 使用配置中的进程数，默认使用所有可用CPU核心
    print(f"使用 {num_processes} 个进程进行数据采集")
    
    # 计算每个进程需要采集的对局数量（确保每局游戏完整）
    episodes_per_process = ceil(buffer_size / max_timesteps) // num_processes + 1  # 每进程至少采集1局
    print(f"每进程需采集 {episodes_per_process} 局数据")
    
    # 初始化测试结果
    test_results = None
    max_hunter_win = 0
    max_hunter_reward = -float('inf')  # 初始化猎手最高奖励为负无穷
    max_reward_model_path = None  # 保存最高奖励模型路径

    for epoch in range(1, max_epochs + 1):
        print(f"\n========= 第 {epoch} 个Epoch =========")
        
        # 收集当前模型状态字典（确保在CPU上保存）
        original_device = next(ppo_hunter.policy.parameters()).device
        ppo_hunter.policy = ppo_hunter.policy.cpu()
        
        model_state_dict = {
            'policy': deepcopy(ppo_hunter.policy.state_dict()),
            'optimizer': deepcopy(ppo_hunter.optimizer.state_dict()),
            'entropy_weight': ppo_hunter.entropy_weight
        }
        
        # 将模型移回原设备
        ppo_hunter.policy = ppo_hunter.policy.to(original_device)
        
        # 环境配置
        env_config = {
            'render_mode': 'rgb_array'
        }
        
        # 创建多进程进行数据采集
        pool = None
        try:
            pool = mp.Pool(processes=num_processes)
            # 为每个进程分配任务
            tasks = []
            for process_id in range(num_processes):
                task = pool.apply_async(collect_data_process, (
                    process_id,
                    env_config,
                    model_state_dict,
                    buffer_size,
                    max_timesteps,
                    episodes_per_process
                ))
                tasks.append(task)
            
            # 收集所有进程的数据
            all_data = []
            total_collected_steps = 0
            for i, task in enumerate(tasks):
                process_data = task.get()
                all_data.append(process_data)
                total_collected_steps += process_data['total_steps']
                print(f"进程 {i} 采集完成: {process_data['total_steps']} 步")
        
        except KeyboardInterrupt:
            print("\n检测到Ctrl+C，正在终止所有进程...")
            if pool:
                pool.terminate()  # 终止所有子进程
                pool.join()       # 等待所有子进程终止
            raise  # 重新抛出异常，确保程序完全退出
        
        finally:
            if pool and pool._state == 0:  # 检查进程池是否仍在运行
                pool.close()
                pool.join()
        
        print(f"\n总采集样本数: {total_collected_steps}，目标样本数: {buffer_size}")
        
        # 整合所有进程的数据，保持游戏进程的时序性
        all_game_data = []
        total_samples = 0
        
        # 将每个进程的数据作为独立的游戏序列保存
        for process_data in all_data:
            game_data = {
                'states': process_data['states'],
                'actions': process_data['actions'],
                'log_probs': process_data['log_probs'],
                'rewards': process_data['rewards'],
                'dones': process_data['dones']
            }
            all_game_data.append(game_data)
            total_samples += len(process_data['states'])
        
        # 确保数据量不低于buffer_size（数据采集补偿机制）
        if total_samples < buffer_size:
            print(f"警告：采集样本数不足，需要额外采集 {buffer_size - total_samples} 个样本")
            # 单进程额外采集补充数据
            additional_data = collect_data_process(
                -1,  # 标识为补充进程
                env_config,
                model_state_dict,
                buffer_size,
                max_timesteps,
                ceil((buffer_size - total_samples) / max_timesteps) + 1  # 额外增加1局冗余
            )
            
            # 添加补充数据
            additional_game_data = {
                'states': additional_data['states'],
                'actions': additional_data['actions'],
                'log_probs': additional_data['log_probs'],
                'rewards': additional_data['rewards'],
                'dones': additional_data['dones']
            }
            all_game_data.append(additional_game_data)
            total_samples += len(additional_data['states'])
            
            total_collected_steps += additional_data['total_steps']
            print(f"补充采集完成，总样本数: {total_collected_steps}")
        
        # 从所有游戏数据中选择完整的游戏序列，确保数据量达到buffer_size
        all_states = []
        all_actions = []
        all_log_probs = []
        all_rewards = []
        all_dones = []
        
        # 遍历所有游戏数据，选择完整的游戏序列
        for game_data in all_game_data:
            # 只添加完整的游戏序列（不截断）
            all_states.extend(game_data['states'])
            all_actions.extend(game_data['actions'])
            all_log_probs.extend(game_data['log_probs'])
            all_rewards.extend(game_data['rewards'])
            all_dones.extend(game_data['dones'])
            
            # 如果达到目标样本数，停止添加
            if len(all_states) >= buffer_size:
                break
        
        # 如果数据量超过buffer_size，找到最后一个完整的游戏结束点
        if len(all_states) > buffer_size:
            # 找到最后一个完整的游戏结束点
            for i in range(buffer_size, len(all_states)):
                if all_dones[i-1]:
                    truncate_idx = i
                    break
            else:
                # 如果没有找到结束点，使用buffer_size
                truncate_idx = buffer_size
            
            print(f"数据量超过目标，截断到 {truncate_idx} 个样本（保持完整游戏序列）")
            all_states = all_states[:truncate_idx]
            all_actions = all_actions[:truncate_idx]
            all_log_probs = all_log_probs[:truncate_idx]
            all_rewards = all_rewards[:truncate_idx]
            all_dones = all_dones[:truncate_idx]
        
        # 将整合后的数据加载到PPO智能体的内存中
        ppo_hunter.memory.clear_memory()
        for state, action, log_prob, reward, done in zip(all_states, all_actions, all_log_probs, all_rewards, all_dones):
            ppo_hunter.memory.states.append(state)
            ppo_hunter.memory.actions.append(action)
            ppo_hunter.memory.logprobs.append(log_prob)
            ppo_hunter.memory.rewards.append(reward)
            ppo_hunter.memory.is_terminals.append(done)
        
        # 更新策略并记录loss
        print("\n开始策略更新...")
        loss_info = ppo_hunter.update()
        ppo_hunter.memory.clear_memory()  # 使用memory属性而不是clear_memory方法，确保正确清空内存
        
        # 记录loss到TensorBoard
        if config['enable_tensorboard']:
            logger.log_scalar('loss/total_loss', loss_info['total_loss'], epoch)
            logger.log_scalar('loss/actor_surr_loss1', loss_info['actor_surr_loss1'], epoch)
            logger.log_scalar('loss/actor_surr_loss2', loss_info['actor_surr_loss2'], epoch)
            logger.log_scalar('loss/critic_value_loss', loss_info['critic_value_loss'], epoch)
            logger.log_scalar('loss/total_entropy', loss_info['total_entropy'], epoch)
        
        print(f"策略更新完成，总损失: {loss_info['total_loss']:.4f}")
        
        # 测试智能体性能
        if epoch > 0 and epoch % test_interval == 0:
            print(f"\n执行测试（第 {epoch} 个Epoch）...")
            test_results = test_agent(env, ppo_hunter, config['num_test_episodes'], max_timesteps)
            
            # 记录猎手胜利次数最大时的模型权重
            if test_results['hunter_wins'] >= max_hunter_win:
                max_hunter_win = test_results['hunter_wins']
                ppo_hunter.save(f'models/hunter_model_max_win_{max_hunter_win}.pth')
            
            # 记录猎手平均奖励最高时的模型权重
            if test_results['avg_reward_hunter'] > max_hunter_reward:
                max_hunter_reward = test_results['avg_reward_hunter']
                # 保存到models目录
                ppo_hunter.save(f'models/hunter_model_max_reward_{max_hunter_reward:.2f}.pth')
                # 同时保存到日志目录，方便查找
                max_reward_model_path = os.path.join(logger.log_dir, f'hunter_max_reward_{max_hunter_reward:.2f}.pth')
                ppo_hunter.save(max_reward_model_path)
            
            print(f'测试结果:')
            print(f'  猎手平均奖励: {test_results["avg_reward_hunter"]:.2f}')
            print(f'  逃脱者平均奖励: {test_results["avg_reward_escaper"]:.2f}')
            print(f'  猎手胜利次数: {test_results["hunter_wins"]}')
            print(f'  逃脱者胜利次数: {test_results["escaper_wins"]}')
            
            # 记录测试结果到TensorBoard
            if config['enable_tensorboard']:
                logger.log_scalar('test/reward_hunter', test_results['avg_reward_hunter'], epoch)
                logger.log_scalar('test/reward_escaper', test_results['avg_reward_escaper'], epoch)
                logger.log_scalar('test/hunter_wins', test_results['hunter_wins'], epoch)
                logger.log_scalar('test/escaper_wins', test_results['escaper_wins'], epoch)
        
        # 记录统计信息到CSV文件
        if epoch % log_interval == 0:
            # 使用测试结果中的奖励作为当前epoch的奖励
            if test_results:
                recent_rewards_hunter.append(test_results['avg_reward_hunter'])
                recent_rewards_escaper.append(test_results['avg_reward_escaper'])
            
            # 计算平均奖励
            avg_reward_hunter = sum(recent_rewards_hunter) / len(recent_rewards_hunter) if recent_rewards_hunter else 0
            avg_reward_escaper = sum(recent_rewards_escaper) / len(recent_rewards_escaper) if recent_rewards_escaper else 0
            
            # 使用log_scalars一次记录多个相关指标，提高效率
            logger.log_scalars('reward/avg', {
                'hunter': avg_reward_hunter,
                'escaper': avg_reward_escaper
            }, epoch)
            
            # 打印日志
            print(f"\n====== 第 {epoch} 个Epoch 统计信息 ======")
            print(f"猎手平均奖励: {avg_reward_hunter:.2f}")
            print(f"逃脱者平均奖励: {avg_reward_escaper:.2f}")
            print(f"总损失: {loss_info['total_loss']:.4f}")
            print(f"总熵: {loss_info['total_entropy']:.4f}")
            print(f"采集样本数: {len(all_states)}")
            print(f"==================================")
            
            stats = {
                'episode': epoch,  # 保持与原有字段名一致
                'avg_reward_hunter': avg_reward_hunter,
                'avg_reward_escaper': avg_reward_escaper,
                'total_loss': loss_info['total_loss'],
                'actor_surr_loss1': loss_info['actor_surr_loss1'],  # 代理损失（第一个动作分支）
                'actor_surr_loss2': loss_info['actor_surr_loss2'],  # 代理损失（第二个动作分支）
                'critic_value_loss': loss_info['critic_value_loss'],  # 评论家价值损失
                'total_entropy': loss_info['total_entropy'],  # 策略熵（探索性度量）
                'test_avg_reward_hunter': test_results['avg_reward_hunter'] if test_results else 0,
                'test_avg_reward_escaper': test_results['avg_reward_escaper'] if test_results else 0,
                'test_hunter_wins': test_results['hunter_wins'] if test_results else 0,
                'test_escaper_wins': test_results['escaper_wins'] if test_results else 0
            }
            
            if config['enable_csv']:
                logger.log_training_stats(epoch, stats)

    # 关闭日志记录器和环境
    logger.close()
    env.close()
    
    # 保存最终模型
    final_model_path = os.path.join(logger.log_dir, 'hunter.pth')
    ppo_hunter.save(final_model_path)
    print(f"\n训练完成！")
    print(f"最终模型保存到: {final_model_path}")
    if max_reward_model_path:
        print(f"最高奖励模型保存到: {max_reward_model_path}")
        print(f"最高奖励值: {max_hunter_reward:.2f}")
    if config['enable_tensorboard']:
        print(f"TensorBoard日志路径: {logger.log_dir}")
        print(f"查看TensorBoard: tensorboard --logdir={logger.log_dir}")
    if config['enable_csv']:
        print(f"CSV统计文件: {logger.csv_file}")


if __name__ == '__main__':
    main()