#!/usr/bin/env python3
"""
可视化验证训练好的PPO模型效果

功能：
1. 加载训练好的最佳模型
2. 使用渲染客户端与环境交互
3. 显示每个回合的奖励和步数
4. 绘制性能图表
"""

from __future__ import annotations

import logging
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
from tensordict.nn import TensorDictModule
from tensordict.nn.distributions import NormalParamExtractor
from torch import nn
from torchrl.envs.utils import ExplorationType, set_exploration_type
from torchrl.modules import ProbabilisticActor, TanhNormal, ValueOperator
from tensordict import TensorDict  # 添加这一行

from parnassus.clients.arm_stream.render_client import RenderClient


# 配置日志
logging.basicConfig(level=logging.INFO, 
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("model_visualizer")

# 模型参数 (与训练时相同)
num_cells = 32
device = torch.device("cpu")  # 渲染验证通常在CPU上运行


def create_policy_module(action_low, action_high, obs_dim=4):
    """创建与训练时相同结构的策略网络"""
    # 创建actor网络
    actor_net = nn.Sequential(
        nn.Linear(obs_dim, num_cells),  # 这里使用Linear而不是LazyLinear，因为我们知道输入维度
        nn.Tanh(),
        nn.Linear(num_cells, num_cells),
        nn.Tanh(),
        nn.Linear(num_cells, num_cells),
        nn.Tanh(),
        nn.Linear(num_cells, 2),  # 假设只有一个动作，需要loc和scale两个参数
        NormalParamExtractor(),
    )
    
    # 创建策略模块
    policy_module = TensorDictModule(
        actor_net, in_keys=["observation"], out_keys=["loc", "scale"]
    )
    
    # 创建概率策略
    policy_module = ProbabilisticActor(
        module=policy_module,
        spec=None,  # 我们不需要在这里使用spec，因为我们只用于推断
        in_keys=["loc", "scale"],
        distribution_class=TanhNormal,
        distribution_kwargs={
            "low": action_low,
            "high": action_high,
        },
        return_log_prob=False,  # 推断时不需要返回对数概率
    )
    
    return policy_module


def load_best_model(model_path, policy_module):
    """加载训练好的最佳模型"""
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"Model file not found: {model_path}")
    
    checkpoint = torch.load(model_path, map_location=device)
    policy_module.load_state_dict(checkpoint['policy_state_dict'])
    
    logger.info(f"✅ 成功加载最佳模型！(来自第{checkpoint['epoch']}次迭代, 评估奖励: {checkpoint['eval_reward']:.4f})")
    return checkpoint['eval_reward']


def convert_to_numpy(observation):
    """将客户端返回的观测转换为numpy数组，确保维度正确"""
    if isinstance(observation, np.ndarray):
        return observation
    else:
        return np.array(observation, dtype=np.float32)


def run_episodes(client, policy_module, num_episodes=5, max_steps=1000):
    """运行多个回合，显示渲染并记录性能"""
    episode_rewards = []
    episode_steps = []
    
    for episode in range(num_episodes):
        logger.info(f"\n{'='*60}\n回合 {episode+1}/{num_episodes} 开始\n{'='*60}")
        
        # 重置环境
        obs = client.reset(seed=None)  # 随机种子
        obs_np = convert_to_numpy(obs)
        
        # 准备记录数据
        total_reward = 0.0
        steps = 0
        
        # 开始回合
        with set_exploration_type(ExplorationType.DETERMINISTIC), torch.no_grad():
            done = False
            
            while not done and steps < max_steps:
                # 转换观测为tensor
                obs_tensor = torch.tensor(obs_np, dtype=torch.float32).unsqueeze(0)
                
                # 创建tensordict
                tensordict = TensorDict({"observation": obs_tensor}, batch_size=[1])
                
                # 使用策略网络预测动作
                policy_out = policy_module(tensordict)
                
                # 修改这里 - 确保动作至少是1维的
                action = policy_out["action"].cpu().numpy()
                action = np.atleast_1d(action)  # 确保至少是1维
                
                # 可选 - 打印调试信息
                logger.debug(f"动作形状: {action.shape}, 值: {action}")
                
                # 执行动作
                reply = client.step(action)
                
                # 获取新状态和奖励
                reward = float(reply["reward"])
                done = bool(reply["terminated"]) or bool(reply["truncated"])
                obs_np = convert_to_numpy(reply["observation"])
                
                # 更新统计信息
                total_reward += reward
                steps += 1
                
                # 每10步输出一次信息
                # if steps % 10 == 0 or done:
                #     logger.info(f"Step {steps}: Action={action[0]:.4f}, Reward={reward:.4f}, Total={total_reward:.4f}")
                    
                # 添加短暂延迟以便观察渲染
                time.sleep(0.01)
            
        # 记录回合结果
        episode_rewards.append(total_reward)
        episode_steps.append(steps)
        logger.info(f"回合 {episode+1} 结束: 总奖励={total_reward:.4f}, 步数={steps}")
    
    return episode_rewards, episode_steps

def plot_results(episode_rewards, episode_steps, best_eval_reward):
    """绘制测试结果图表"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
    
    # 奖励图
    episodes = range(1, len(episode_rewards) + 1)
    ax1.bar(episodes, episode_rewards, color='skyblue')
    ax1.axhline(y=sum(episode_rewards)/len(episode_rewards), color='red', 
                linestyle='--', label=f'平均: {sum(episode_rewards)/len(episode_rewards):.2f}')
    ax1.axhline(y=best_eval_reward, color='green', 
                linestyle='-.', label=f'训练最佳: {best_eval_reward:.2f}')
    ax1.set_xlabel('回合')
    ax1.set_ylabel('累计奖励')
    ax1.set_title('测试回合奖励')
    ax1.set_xticks(episodes)
    ax1.grid(True, alpha=0.3)
    ax1.legend()
    
    # 步数图
    ax2.bar(episodes, episode_steps, color='lightgreen')
    ax2.axhline(y=sum(episode_steps)/len(episode_steps), color='red', 
                linestyle='--', label=f'平均: {sum(episode_steps)/len(episode_steps):.2f}')
    ax2.set_xlabel('回合')
    ax2.set_ylabel('步数')
    ax2.set_title('测试回合步数')
    ax2.set_xticks(episodes)
    ax2.grid(True, alpha=0.3)
    ax2.legend()
    
    plt.tight_layout()
    plt.savefig("logs/model_test_results.png", dpi=150)
    logger.info(f"📊 测试结果图表已保存至 logs/model_test_results.png")
    plt.show()


def main():
    """主函数"""
    # 模型路径
    model_path = "logs/best_ppo_model.pt"
    
    # 动作范围 - 假设范围是[-1, 1]
    action_low = torch.tensor([-1.0])
    action_high = torch.tensor([1.0])
    
    # 创建策略模块
    policy_module = create_policy_module(action_low, action_high)
    
    # 加载最佳模型
    best_eval_reward = load_best_model(model_path, policy_module)
    
    # 确保logs文件夹存在
    os.makedirs("logs", exist_ok=True)
    
    try:
        # 创建渲染客户端并运行测试
        with RenderClient("localhost:50051") as client:
            logger.info("✅ 已连接到渲染客户端")
            
            # 运行多个回合
            num_episodes = 5  # 可以根据需要调整
            episode_rewards, episode_steps = run_episodes(
                client, policy_module, num_episodes=num_episodes
            )
            
            # 输出测试统计信息
            logger.info("\n" + "="*60)
            logger.info("📊 测试结果摘要:")
            logger.info(f"  测试回合数: {num_episodes}")
            logger.info(f"  平均奖励: {sum(episode_rewards)/len(episode_rewards):.4f} ± {np.std(episode_rewards):.4f}")
            logger.info(f"  平均步数: {sum(episode_steps)/len(episode_steps):.2f}")
            logger.info(f"  最大奖励: {max(episode_rewards):.4f}")
            logger.info(f"  最小奖励: {min(episode_rewards):.4f}")
            logger.info("="*60)
            
            # 绘制结果
            plot_results(episode_rewards, episode_steps, best_eval_reward)
            
    except Exception as e:
        logger.error(f"发生错误: {e}")
        raise


if __name__ == "__main__":
    main()