"""详细评估脚本：记录每一步的奖励和路径，并可视化"""
import argparse
import os
import torch
import numpy as np
import time
try:
    import matplotlib
    matplotlib.use('Agg')  # 非交互式后端
    import matplotlib.pyplot as plt
    HAS_MATPLOTLIB = True
except ImportError:
    HAS_MATPLOTLIB = False
    print("Warning: matplotlib not found. Visualization will be disabled.")

from src.envs.overcooked.overcooked_env import Overcooked
from src.algo.r_mappo.rMAPPOPolicy import R_MAPPOPolicy


def parse_eval_args():
    parser = argparse.ArgumentParser(description='详细评估 MACE Overcooked 模型')

    # 必需参数
    parser.add_argument('--model_dir', type=str, required=True,
                        help='模型路径')
    parser.add_argument('--layout_name', type=str, default='small_coor_2',
                        help='地图名称')
    parser.add_argument('--max_timesteps', type=int, default=300,
                        help='最大步数')

    # 评估参数
    parser.add_argument('--n_eval_episodes', type=int, default=10,
                        help='评估轮数')
    parser.add_argument('--render', action='store_true',
                        help='渲染游戏画面')
    parser.add_argument('--render_delay', type=float, default=0.1,
                        help='渲染延迟（秒）')
    parser.add_argument('--deterministic', action='store_true',
                        help='使用确定性策略')
    parser.add_argument('--save_trajectory', action='store_true', default=True,
                        help='保存轨迹数据')
    parser.add_argument('--output_dir', type=str, default='eval_results',
                        help='输出目录')

    # 设备参数
    parser.add_argument('--cuda', action='store_true', default=False)
    parser.add_argument('--seed', type=int, default=0)

    return parser.parse_args()


def load_model(model_dir, env, device):
    """加载训练好的模型"""
    print(f"\n从 {model_dir} 加载模型...")

    if not os.path.exists(model_dir):
        raise ValueError(f"模型目录不存在: {model_dir}")

    obs_space = env.observation_space[0]
    act_space = env.action_space[0]
    num_agents = env.num_players

    print(f"环境信息: {num_agents} 个智能体")
    print(f"观察空间: {obs_space}")
    print(f"动作空间: {act_space}")

    # 从训练配置加载参数
    config_path = os.path.join(os.path.dirname(os.path.dirname(model_dir)), 'params.json')
    if os.path.exists(config_path):
        import json
        with open(config_path, 'r') as f:
            saved_config = json.load(f)
        print(f"✓ 加载配置: {config_path}")

        # 使用训练时的配置
        class Args:
            # 从保存的配置中读取
            actor_lr = saved_config.get('actor_lr', 7e-4)
            critic_lr = saved_config.get('critic_lr', 7e-4)
            rnd_lr = saved_config.get('rnd_lr', 5e-4)
            opti_eps = saved_config.get('opti_eps', 1e-5)
            weight_decay = saved_config.get('weight_decay', 0)

            # Policy settings
            use_recurrent_policy = saved_config.get('use_recurrent_policy', True)
            use_naive_recurrent_policy = saved_config.get('use_naive_recurrent_policy', False)
            recurrent_N = saved_config.get('recurrent_N', 1)
            hidden_size = saved_config.get('hidden_size', 64)
            layer_N = saved_config.get('layer_N', 1)
            stacked_frames = saved_config.get('stacked_frames', 1)
            use_feature_normalization = saved_config.get('use_feature_normalization', False)
            use_orthogonal = saved_config.get('use_orthogonal', True)
            use_layernorm = saved_config.get('use_layernorm', False)
            gain = saved_config.get('gain', 0.01)
            use_policy_active_masks = saved_config.get('use_policy_active_masks', True)
            use_ReLU = saved_config.get('use_ReLU', False)

            # Value settings
            use_popart = saved_config.get('use_popart', False)
            use_valuenorm = saved_config.get('use_valuenorm', False)
            use_max_grad_norm = saved_config.get('use_max_grad_norm', True)
            max_grad_norm = saved_config.get('max_grad_norm', 10.0)
            use_clipped_value_loss = saved_config.get('use_clipped_value_loss', True)
            use_huber_loss = saved_config.get('use_huber_loss', True)
            huber_delta = saved_config.get('huber_delta', 10.0)
            use_proper_time_limits = saved_config.get('use_proper_time_limits', True)

            # Novelty and HDD settings
            novel_type = saved_config.get('novel_type', 4)
            rnd_rep_size = saved_config.get('rnd_rep_size', 128)
            rep_size = saved_config.get('rep_size', 128)
            use_hdd = saved_config.get('use_hdd', True)
            hdd_count = saved_config.get('hdd_count', False)
            hdd_input_type = saved_config.get('hdd_input_type', 1)
            hdd_lr = saved_config.get('hdd_lr', 5e-4)
            hdd_hidden_size = saved_config.get('hdd_hidden_size', 128)
            hdd_ret_ub = saved_config.get('hdd_ret_ub', 0.5)
            hdd_reduce = saved_config.get('hdd_reduce', 'mean')
            hdd_epoch = saved_config.get('hdd_epoch', 4)
            hdd_batch_size = saved_config.get('hdd_batch_size', 100000)
            use_hdd_active_masks = saved_config.get('use_hdd_active_masks', True)
            hdd_last_rew = saved_config.get('hdd_last_rew', False)
            hdd_reverse_ret = saved_config.get('hdd_reverse_ret', False)
            n_agents = saved_config.get('n_agents', 2)

        print(f"  hidden_size: {Args.hidden_size}")
        print(f"  use_feature_normalization: {Args.use_feature_normalization}")
        print(f"  use_popart: {Args.use_popart}")
        print(f"  use_valuenorm: {Args.use_valuenorm}")
    else:
        raise ValueError(f"找不到配置文件: {config_path}")

    args = Args()

    # 创建策略
    policies = []
    for agent_id in range(num_agents):
        policy = R_MAPPOPolicy(args, obs_space, obs_space, act_space, device=device)

        # 加载 actor
        actor_path = os.path.join(model_dir, f'actor_agent{agent_id}.pt')
        if not os.path.exists(actor_path):
            raise ValueError(f"找不到模型文件: {actor_path}")

        actor_state_dict = torch.load(actor_path, map_location=device)
        policy.actor.load_state_dict(actor_state_dict)
        policy.actor.eval()
        print(f"✓ 加载 agent{agent_id} 的 actor 模型")

        policies.append(policy)

    return policies


def evaluate_with_trajectory(env, policies, args):
    """详细评估并记录轨迹"""
    num_agents = env.num_players
    all_trajectories = []
    episode_rewards = []
    episode_lengths = []

    print("\n开始详细评估...")

    for episode in range(args.n_eval_episodes):
        obs = env.reset()
        # 保持 RNN states 和 masks 为 torch.Tensor！
        rnn_states = [torch.zeros((1, 1, 64)) for _ in range(num_agents)]
        masks = [torch.ones((1, 1)) for _ in range(num_agents)]

        trajectory = {
            'observations': [],
            'actions': [],
            'rewards': [],
            'positions': [],
            'episode': episode
        }

        episode_reward = 0
        step = 0
        done = False

        print(f"\n{'='*50}")
        print(f"Episode {episode + 1}/{args.n_eval_episodes}")
        print(f"{'='*50}")

        while not done and step < args.max_timesteps:
            # 记录观察
            trajectory['observations'].append([o.copy() for o in obs])

            # 获取动作
            actions = []

            for agent_id in range(num_agents):
                obs_tensor = torch.FloatTensor(obs[agent_id]).unsqueeze(0).to(policies[agent_id].device)

                with torch.no_grad():
                    # 使用 policy.act() 而不是 policy.actor()
                    action, rnn_state = policies[agent_id].act(
                        obs_tensor,
                        rnn_states[agent_id],
                        masks[agent_id],
                        available_actions=None,
                        deterministic=args.deterministic
                    )

                actions.append(int(action.item()))
                rnn_states[agent_id] = rnn_state  # 直接赋值，保持为tensor

            # 记录动作
            trajectory['actions'].append([int(a) for a in actions])

            # 执行动作
            obs, rewards, dones, infos = env.step(actions)

            # 记录奖励
            reward = rewards[0][0]
            trajectory['rewards'].append(reward)

            # 渲染
            if args.render:
                env.render()
                time.sleep(args.render_delay)

            episode_reward += reward
            step += 1
            done = dones[0]

            # 打印详细信息
            if step % 50 == 0 or reward > 0:
                print(f"  步骤 {step}: 动作={actions}, 奖励={reward:.2f}, 累计奖励={episode_reward:.2f}")

            # 如果获得奖励，打印详细信息
            if reward > 0:
                print(f"  ⭐ 步骤 {step}: 获得奖励 {reward:.2f}!")

            masks = [np.array([[0.0] if dones[i] else [1.0]]) for i in range(num_agents)]

        episode_rewards.append(episode_reward)
        episode_lengths.append(step)
        all_trajectories.append(trajectory)

        print(f"\n✓ Episode {episode + 1} 完成:")
        print(f"  总奖励: {episode_reward:.2f}")
        print(f"  总步数: {step}")
        print(f"  平均每步奖励: {episode_reward/step if step > 0 else 0:.4f}")

    # 统计结果
    mean_reward = np.mean(episode_rewards)
    std_reward = np.std(episode_rewards)
    mean_length = np.mean(episode_lengths)

    print("\n" + "="*60)
    print(f"评估统计 ({args.n_eval_episodes} episodes):")
    print(f"  平均奖励: {mean_reward:.2f} ± {std_reward:.2f}")
    print(f"  平均长度: {mean_length:.2f}")
    print(f"  最高奖励: {max(episode_rewards):.2f}")
    print(f"  最低奖励: {min(episode_rewards):.2f}")
    print(f"  成功率: {sum(1 for r in episode_rewards if r > 0) / len(episode_rewards) * 100:.1f}%")
    print("="*60)

    return all_trajectories, episode_rewards, episode_lengths


def visualize_results(trajectories, episode_rewards, episode_lengths, output_dir):
    """可视化评估结果"""
    os.makedirs(output_dir, exist_ok=True)

    # 绘图部分（如果有 matplotlib）
    if HAS_MATPLOTLIB:
        # 1. 绘制每个 episode 的奖励曲线
        plt.figure(figsize=(12, 8))

        plt.subplot(2, 2, 1)
        plt.plot(episode_rewards, 'o-', linewidth=2, markersize=8)
        plt.xlabel('Episode', fontsize=12)
        plt.ylabel('Total Reward', fontsize=12)
        plt.title('Episode Rewards', fontsize=14)
        plt.grid(True, alpha=0.3)

        plt.subplot(2, 2, 2)
        plt.plot(episode_lengths, 's-', linewidth=2, markersize=8, color='orange')
        plt.xlabel('Episode', fontsize=12)
        plt.ylabel('Episode Length', fontsize=12)
        plt.title('Episode Lengths', fontsize=14)
        plt.grid(True, alpha=0.3)

        # 2. 绘制奖励分布
        plt.subplot(2, 2, 3)
        plt.hist(episode_rewards, bins=20, edgecolor='black', alpha=0.7)
        plt.xlabel('Reward', fontsize=12)
        plt.ylabel('Frequency', fontsize=12)
        plt.title('Reward Distribution', fontsize=14)
        plt.grid(True, alpha=0.3, axis='y')

        # 3. 绘制每步奖励热图（选择最好的一个 episode）
        plt.subplot(2, 2, 4)
        best_ep_idx = np.argmax(episode_rewards)
        best_trajectory = trajectories[best_ep_idx]
        rewards = best_trajectory['rewards']

        # 创建热图数据
        step_rewards = np.array(rewards).reshape(1, -1)
        plt.imshow(step_rewards, aspect='auto', cmap='RdYlGn', interpolation='nearest')
        plt.colorbar(label='Reward')
        plt.xlabel('Step', fontsize=12)
        plt.title(f'Best Episode Step Rewards (Total: {sum(rewards):.2f})', fontsize=14)

        plt.tight_layout()
        plot_path = os.path.join(output_dir, 'evaluation_summary.png')
        plt.savefig(plot_path, dpi=150, bbox_inches='tight')
        print(f"\n✓ 保存可视化结果: {plot_path}")
        plt.close()
    else:
        print("\n⚠ matplotlib 未安装，跳过图表生成")

    # 4. 保存详细的轨迹数据
    trajectory_path = os.path.join(output_dir, 'trajectories.npz')
    np.savez(trajectory_path,
             episode_rewards=episode_rewards,
             episode_lengths=episode_lengths,
             trajectories=trajectories)
    print(f"✓ 保存轨迹数据: {trajectory_path}")

    # 5. 保存文本报告
    report_path = os.path.join(output_dir, 'evaluation_report.txt')
    with open(report_path, 'w') as f:
        f.write("="*60 + "\n")
        f.write("Overcooked 模型评估报告\n")
        f.write("="*60 + "\n\n")
        f.write(f"总 Episodes: {len(episode_rewards)}\n")
        f.write(f"平均奖励: {np.mean(episode_rewards):.2f} ± {np.std(episode_rewards):.2f}\n")
        f.write(f"平均长度: {np.mean(episode_lengths):.2f}\n")
        f.write(f"最高奖励: {max(episode_rewards):.2f}\n")
        f.write(f"最低奖励: {min(episode_rewards):.2f}\n")
        f.write(f"成功率: {sum(1 for r in episode_rewards if r > 0) / len(episode_rewards) * 100:.1f}%\n\n")

        f.write("每个 Episode 的详细信息:\n")
        f.write("-"*60 + "\n")
        for i, (reward, length) in enumerate(zip(episode_rewards, episode_lengths)):
            f.write(f"Episode {i+1}: 奖励={reward:.2f}, 长度={length}\n")

    print(f"✓ 保存评估报告: {report_path}")


def main():
    args = parse_eval_args()

    # 设置设备
    if args.cuda and torch.cuda.is_available():
        device = torch.device("cuda:0")
        print("使用 GPU")
    else:
        device = torch.device("cpu")
        print("使用 CPU")

    # 设置随机种子
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # 创建环境
    print(f"\n创建 Overcooked 环境: {args.layout_name}")
    env_config = {
        "layout_name": args.layout_name,
        "max_timesteps": args.max_timesteps,
        "obs_type": "vector",
        "multi_round": False,
    }
    env = Overcooked(**env_config)

    # 加载模型
    policies = load_model(args.model_dir, env, device)

    # 评估
    try:
        trajectories, episode_rewards, episode_lengths = evaluate_with_trajectory(
            env, policies, args
        )

        # 可视化
        if args.save_trajectory:
            visualize_results(trajectories, episode_rewards, episode_lengths, args.output_dir)

    except KeyboardInterrupt:
        print("\n\n评估被用户中断")

    print("\n✅ 评估完成！")


if __name__ == "__main__":
    main()
