"""评估脚本：加载训练好的 Overcooked 模型并可视化"""
import argparse
import os
import torch
import numpy as np
import time

from src.envs.overcooked.overcooked_env import Overcooked
from src.algo.r_mappo.rMAPPOPolicy import R_MAPPOPolicy


def parse_eval_args():
    parser = argparse.ArgumentParser(description='Evaluate trained MACE models on Overcooked')

    # 必需参数
    parser.add_argument('--model_dir', type=str, required=True,
                        help='训练好的模型路径, 例如: logs/results/Overcooked-v0/small_coor_2/rmappo/MACE-xxx/models/cp_800')

    # 环境参数
    parser.add_argument('--layout_name', type=str, default='small_coor_2',
                        help='Overcooked 地图名称')
    parser.add_argument('--max_timesteps', type=int, default=300,
                        help='每个 episode 的最大步数')
    parser.add_argument('--obs_type', type=str, default='vector', choices=['vector', 'image'],
                        help='观察类型')

    # 评估参数
    parser.add_argument('--n_eval_episodes', type=int, default=10,
                        help='评估的 episode 数量')
    parser.add_argument('--render', action='store_true',
                        help='是否渲染游戏画面（需要 pygame）')
    parser.add_argument('--render_delay', type=float, default=0.1,
                        help='渲染时每帧的延迟（秒）')
    parser.add_argument('--deterministic', action='store_true',
                        help='使用确定性策略（不采样）')

    # 设备参数
    parser.add_argument('--cuda', action='store_true', default=False,
                        help='使用 GPU')
    parser.add_argument('--seed', type=int, default=0,
                        help='随机种子')

    return parser.parse_args()


def load_model(model_dir, env, device):
    """加载训练好的模型"""
    print(f"从 {model_dir} 加载模型...")

    # 检查文件是否存在
    if not os.path.exists(model_dir):
        raise ValueError(f"模型目录不存在: {model_dir}")

    # 获取环境信息
    obs_space = env.observation_space[0]
    act_space = env.action_space[0]
    num_agents = env.num_players

    print(f"环境信息: {num_agents} 个智能体")
    print(f"观察空间: {obs_space}")
    print(f"动作空间: {act_space}")

    # 创建策略和训练器（用于加载模型）
    policies = []
    for agent_id in range(num_agents):
        # 简化的配置
        class Args:
            # Learning rates
            actor_lr = 5e-4
            critic_lr = 5e-4
            rnd_lr = 5e-4
            opti_eps = 1e-5
            weight_decay = 0

            # Policy settings
            use_recurrent_policy = True
            use_naive_recurrent_policy = False
            recurrent_N = 1
            hidden_size = 64  # 从模型权重推断出来
            layer_N = 1
            stacked_frames = 1
            use_feature_normalization = False  # 模型中没有 feature_norm 层
            use_orthogonal = True
            use_layernorm = False
            gain = 0.01
            use_policy_active_masks = True
            use_ReLU = False

            # Value settings
            use_popart = True
            use_valuenorm = True
            use_max_grad_norm = True
            max_grad_norm = 10.0
            use_clipped_value_loss = True
            use_huber_loss = True
            huber_delta = 10.0
            use_proper_time_limits = False

            # Novelty and HDD settings
            novel_type = 4  # RND
            rnd_rep_size = 128
            rep_size = 128
            use_hdd = True
            hdd_count = False
            hdd_input_type = 1
            hdd_lr = 5e-4
            hdd_hidden_size = 128
            hdd_ret_ub = 0.5
            hdd_reduce = "mean"
            hdd_epoch = 4
            hdd_batch_size = 100000
            use_hdd_active_masks = True
            hdd_last_rew = False
            hdd_reverse_ret = False
            n_agents = 2

        args = Args()

        # 创建策略
        policy = R_MAPPOPolicy(
            args,
            obs_space,
            obs_space,  # share_obs_space
            act_space,
            device=device
        )

        # 加载 actor 权重
        actor_path = os.path.join(model_dir, f'actor_agent{agent_id}.pt')
        if not os.path.exists(actor_path):
            raise ValueError(f"Actor 模型文件不存在: {actor_path}")

        actor_state_dict = torch.load(actor_path, map_location=device)
        policy.actor.load_state_dict(actor_state_dict)
        policy.actor.eval()

        print(f"✓ 加载 agent{agent_id} 的 actor 模型")

        policies.append(policy)

    return policies


def evaluate(env, policies, args):
    """评估策略"""
    num_agents = env.num_players
    episode_rewards = []
    episode_lengths = []

    for episode in range(args.n_eval_episodes):
        obs = env.reset()
        # RNN hidden state shape: (batch=1, recurrent_N, hidden_size)
        rnn_states = [np.zeros((1, 1, 64))  # recurrent_N=1, hidden_size=64
                      for _ in range(num_agents)]
        masks = [np.ones((1, 1)) for _ in range(num_agents)]

        episode_reward = 0
        step = 0
        done = False

        print(f"\n=== Episode {episode + 1}/{args.n_eval_episodes} ===")

        while not done and step < args.max_timesteps:
            # 获取动作
            actions = []
            new_rnn_states = []

            for agent_id in range(num_agents):
                obs_tensor = torch.FloatTensor(obs[agent_id]).unsqueeze(0).to(policies[agent_id].device)
                rnn_state_tensor = torch.FloatTensor(rnn_states[agent_id]).to(policies[agent_id].device)
                mask_tensor = torch.FloatTensor(masks[agent_id]).to(policies[agent_id].device)

                with torch.no_grad():
                    action, rnn_state = policies[agent_id].act(
                        obs_tensor,
                        rnn_state_tensor,
                        mask_tensor,
                        deterministic=args.deterministic
                    )

                actions.append(action.cpu().numpy()[0])
                new_rnn_states.append(rnn_state.cpu().numpy())

            rnn_states = new_rnn_states

            # 执行动作
            obs, rewards, dones, _ = env.step(actions)

            # 渲染
            if args.render:
                env.render()
                time.sleep(args.render_delay)

            # 记录奖励
            # rewards 是嵌套列表: [[reward], [reward], ...]
            episode_reward += rewards[0][0]  # 所有智能体共享同一个奖励
            step += 1
            done = dones[0]  # 所有智能体的 done 状态相同

            # 更新 masks
            masks = [np.array([[0.0] if dones[i] else [1.0]]) for i in range(num_agents)]

        episode_rewards.append(episode_reward)
        episode_lengths.append(step)

        print(f"奖励: {episode_reward:.2f}, 长度: {step}")

    # 统计结果
    mean_reward = np.mean(episode_rewards)
    std_reward = np.std(episode_rewards)
    mean_length = np.mean(episode_lengths)

    print("\n" + "="*50)
    print(f"评估结果 ({args.n_eval_episodes} episodes):")
    print(f"平均奖励: {mean_reward:.2f} ± {std_reward:.2f}")
    print(f"平均长度: {mean_length:.2f}")
    print(f"最高奖励: {max(episode_rewards):.2f}")
    print(f"最低奖励: {min(episode_rewards):.2f}")
    print("="*50)

    return episode_rewards


def main():
    args = parse_eval_args()

    # 设置设备
    if args.cuda and torch.cuda.is_available():
        device = torch.device("cuda:0")
        print("使用 GPU")
    else:
        device = torch.device("cpu")
        print("使用 CPU")

    # 设置随机种子
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # 创建环境
    print(f"\n创建 Overcooked 环境: {args.layout_name}")
    env_config = {
        "layout_name": args.layout_name,
        "max_timesteps": args.max_timesteps,
        "obs_type": args.obs_type,
        "multi_round": False,
    }
    env = Overcooked(**env_config)

    # 加载模型
    policies = load_model(args.model_dir, env, device)

    # 评估
    print("\n开始评估...")
    if args.render:
        print("提示: 按 Ctrl+C 可以提前终止评估")

    try:
        evaluate(env, policies, args)
    except KeyboardInterrupt:
        print("\n\n评估被用户中断")

    print("\n评估完成！")


if __name__ == "__main__":
    main()
