"""调试模型输出"""
import torch
import numpy as np
import json

from src.envs.overcooked.overcooked_env import Overcooked
from src.algo.r_mappo.rMAPPOPolicy import R_MAPPOPolicy

# 加载配置
model_dir = 'logs/results/Overcooked-v0/small_coor_2/rmappo/MACE-1028_163132-seed0/models/cp_900'
config_path = 'logs/results/Overcooked-v0/small_coor_2/rmappo/MACE-1028_163132-seed0/params.json'

with open(config_path, 'r') as f:
    saved_config = json.load(f)

# 创建环境
env = Overcooked(layout_name='small_coor_2', max_timesteps=300, obs_type='vector', multi_round=False)
obs = env.reset()

print("初始观察形状:", [o.shape for o in obs])
print("第一个智能体的观察:", obs[0][:10])  # 打印前10个值

# 创建策略配置
class Args:
    def __init__(self, config):
        for k, v in config.items():
            setattr(self, k, v)

args = Args(saved_config)

# 创建策略
device = torch.device("cpu")
policy = R_MAPPOPolicy(args, env.observation_space[0], env.observation_space[0], env.action_space[0], device=device)

# 加载模型
actor_path = f'{model_dir}/actor_agent0.pt'
actor_state_dict = torch.load(actor_path, map_location=device)
policy.actor.load_state_dict(actor_state_dict)
policy.actor.eval()

print("\n模型加载成功！")

# 准备输入
obs_tensor = torch.FloatTensor(obs[0]).unsqueeze(0)
rnn_state = torch.zeros((1, 1, 64))
mask = torch.ones((1, 1))

print("\n输入形状:")
print(f"  obs_tensor: {obs_tensor.shape}")
print(f"  rnn_state: {rnn_state.shape}")
print(f"  mask: {mask.shape}")

# 测试 actor 的直接输出
with torch.no_grad():
    # 1. 测试 actor forward
    actor_features = policy.actor.base(obs_tensor)
    print(f"\n1. Actor base features 形状: {actor_features.shape}")
    print(f"   前10个值: {actor_features[0, :10]}")

    if policy.actor._use_naive_recurrent_policy or policy.actor._use_recurrent_policy:
        actor_features, rnn_states_out = policy.actor.rnn(actor_features, rnn_state, mask)
        print(f"\n2. 经过 RNN 后的特征形状: {actor_features.shape}")
        print(f"   前10个值: {actor_features[0, :10]}")

    # 获取动作分布
    action_out = policy.actor.act.action_out(actor_features)
    print(f"\n3. Action distribution: {action_out}")
    print(f"   类型: {type(action_out)}")
    # 从分布中获取概率
    action_probs = action_out.probs
    print(f"   Action probabilities: {action_probs}")
    print(f"   最可能的动作: {torch.argmax(action_probs, dim=-1)}")

    # 4. 测试 policy.act()
    action, rnn_state_new = policy.act(obs_tensor, rnn_state, mask, deterministic=True)
    print(f"\n4. policy.act() 输出:")
    print(f"   Action: {action}")
    print(f"   Action value: {action.item()}")

    # 5. 测试非确定性采样
    action_sample, rnn_state_sample = policy.act(obs_tensor, rnn_state, mask, deterministic=False)
    print(f"\n5. policy.act(deterministic=False) 输出:")
    print(f"   Sampled action: {action_sample.item()}")

print("\n运行几步看看:")
for step in range(5):
    # 获取动作
    actions = []
    for agent_id in range(2):
        obs_tensor = torch.FloatTensor(obs[agent_id]).unsqueeze(0)
        rnn_state = torch.zeros((1, 1, 64))
        mask = torch.ones((1, 1))

        with torch.no_grad():
            action, _ = policy.act(obs_tensor, rnn_state, mask, deterministic=True)
        actions.append(int(action.item()))

    print(f"步骤 {step + 1}: 动作={actions}")

    obs, rewards, dones, _ = env.step(actions)
    print(f"        奖励={rewards[0][0]}")

    if dones[0]:
        break
