import gymnasium as gym
import torch
import os
import argparse
import json
import signal
from Hunt_Env.Hunt_env.envs.Hunt_world import HunterEnv
from Hunt_Env.Hunt_env.envs.decision.rule_based_decision import rule_based_decision
from ppo import PPOAgent, Memory, ActorCritic
from train_rl_hunter import process_observation

# 全局变量，用于跟踪是否接收到终止信号
exiting = False

# 全局信号处理函数
def signal_handler(signum, frame):
    global exiting
    if not exiting:
        exiting = True
        print("\n检测到终止信号，正在清理资源...")

# 注册信号处理程序
signal.signal(signal.SIGINT, signal_handler)  # 处理Ctrl+C
signal.signal(signal.SIGTERM, signal_handler)  # 处理终止信号

def load_config(config_file):
    """
    加载配置文件
    
    参数:
        config_file: 配置文件路径
    
    返回:
        dict: 配置字典
    """
    try:
        with open(config_file, 'r') as f:
            config = json.load(f)
        return config
    except FileNotFoundError:
        print(f"错误: 配置文件 {config_file} 不存在")
        sys.exit(1)
    except json.JSONDecodeError:
        print(f"错误: 配置文件 {config_file} 格式错误")
        sys.exit(1)

def parse_arguments():
    """
    解析命令行参数
    
    返回:
        argparse.Namespace: 解析后的参数
    """
    parser = argparse.ArgumentParser(description='评估训练后猎人智能体的性能')
    parser.add_argument('--weight', type=str, help='权重文件路径')
    parser.add_argument('--config', type=str, default='test_config.json', help='配置文件路径')
    parser.add_argument('--max-timesteps', type=int, default=100, help='每回合最大步数')
    parser.add_argument('--render-mode', type=str, default='human', choices=['human', 'rgb_array'], help='渲染模式')
    return parser.parse_args()

def main():
    # 解析命令行参数
    args = parse_arguments()
    
    # 加载配置文件
    if os.path.exists(args.config):
        config = load_config(args.config)
    else:
        config = {}
    
    # 从命令行参数或配置文件中获取权重文件路径
    weight_path = args.weight or config.get('weight_path')
    if not weight_path:
        print("错误: 未指定权重文件路径，请使用 --weight 参数或在配置文件中设置 weight_path")
        return
    
    # 检查权重文件是否存在
    if not os.path.exists(weight_path):
        print(f"错误: 权重文件 {weight_path} 不存在")
        return
    
    # 设置最大步数
    max_timesteps = args.max_timesteps or config.get('max_timesteps', 500)
    
    # 初始化环境
    env = HunterEnv(render_mode=args.render_mode)
    
    try:
        # 初始化猎人智能体
        ppo_hunter = PPOAgent(env, agent_id=0)
        
        # 加载权重文件
        try:
            # 获取当前设备
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            
            # 加载模型权重
            checkpoint = torch.load(weight_path, map_location=device)
            if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
                # 如果是包含state_dict的字典格式
                ppo_hunter.policy.load_state_dict(checkpoint['state_dict'])
                ppo_hunter.policy_old.load_state_dict(checkpoint['state_dict'])
            else:
                # 如果直接是state_dict
                ppo_hunter.policy.load_state_dict(checkpoint)
                ppo_hunter.policy_old.load_state_dict(checkpoint)
            
            print(f"成功加载权重文件: {weight_path}")
            print(f"使用设备: {device}")
        except Exception as e:
            print(f"错误: 加载权重文件失败: {str(e)}")
            return
        
        episode = 0
        hunter_wins = 0
        escaper_wins = 0
        
        print("\n开始评估猎人智能体...")
        print("按 Ctrl+C 退出")
        
        while not exiting:
            episode += 1
            state = env.reset()
            state_hunter = process_observation(state['agent_0'])
            episode_reward_hunter = 0
            episode_reward_escaper = 0
            
            print(f"\n--- 第 {episode} 回合 ---")
            
            for t in range(max_timesteps):
                if exiting:
                    break
                
                # 猎人选择动作
                with torch.no_grad():
                    (action_hunter1, action_hunter2), _ = ppo_hunter.select_action(state_hunter)
                
                # 逃脱者使用基于规则的策略
                (action_escaper1, action_escaper2) = rule_based_decision(state['agent_1'], '逃脱者')
                
                # 构建动作字典
                action = {
                    'agent_0': [action_hunter1.cpu().item(), action_hunter2.cpu().item()],
                    'agent_1': [action_escaper1, action_escaper2]
                }
                
                # 执行动作
                state, _, done, truncated, info = env.step(action)
                
                # 渲染环境
                if args.render_mode == 'human':
                    env.render()
                
                # 处理下一个观察结果
                state_hunter = process_observation(state['agent_0'])
                
                # 更新奖励
                episode_reward_hunter += info['reward']['agent_0']
                episode_reward_escaper += info['reward']['agent_1']
                
                # 检查是否结束
                if done:
                    hunter_wins += 1
                    print("结果: 猎人胜利!")
                    break
                if truncated:
                    escaper_wins += 1
                    print("结果: 逃脱者胜利!")
                    break
            
            # 打印回合结果
            print(f"回合结束，步数: {t+1}")
            print(f"猎人奖励: {episode_reward_hunter:.2f}")
            print(f"逃脱者奖励: {episode_reward_escaper:.2f}")
            
            
            # 打印统计信息
            print(f"当前统计: 猎人 {hunter_wins} 胜, 逃脱者 {escaper_wins} 胜")
        
        print("\n评估完成!")
        print(f"最终统计: 猎人 {hunter_wins} 胜, 逃脱者 {escaper_wins} 胜")
        
    except Exception as e:
        print(f"评估过程中发生错误: {str(e)}")
        import traceback
        traceback.print_exc()
    finally:
        # 清理资源
        env.close()
        print("资源已释放，程序退出")

if __name__ == "__main__":
    main()