#!/usr/bin/env python3
"""
可视化验证训练好的PPO模型效果 - PyBullet 本地渲染版

功能：
1. 加载训练好的最佳模型
2. 使用 PyBullet 直接渲染环境
3. 显示每个回合的奖励和步数
4. 评估模型性能
"""

from __future__ import annotations

import logging
import os
import time
import numpy as np
import torch
from tensordict.nn import TensorDictModule
from tensordict.nn.distributions import NormalParamExtractor
from torch import nn
from torchrl.envs.utils import ExplorationType, set_exploration_type
from torchrl.modules import ProbabilisticActor, TanhNormal, ValueOperator
from tensordict import TensorDict

import pybullet as p
import pybullet_data
from pathlib import Path


# 配置日志
logging.basicConfig(level=logging.INFO, 
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("model_visualizer")

# 模型参数 (与训练时相同)
num_cells = 32
device = torch.device("cpu")  # 渲染验证通常在CPU上运行


class GymEnvGUI:
    """
    基于 PyBullet 的机械臂环境，支持 GUI 渲染。
    关节0：力矩控制（动作 ∈ [-1,1] 缩放为 [-max_torque, +max_torque]）
    关节1：被动（仅受 URDF 阻尼/摩擦/重力）
    观测： [q0, q1, dq0, dq1]
    """
    
    def __init__(
        self,
        urdf_path: str = "urdfs/simple_arm.urdf",
        max_torque: float = 50.0,
        time_step: float = 1.0 / 100.0,
        frame_skip: int = 1,
        max_episode_steps: int = 2500,
        anchor_position: tuple = (0.0, 0.0, 1.05),
        render_mode: str = "gui",  # 'gui' 或 'direct'
    ):
        self.max_episode_steps = max_episode_steps
        self.current_step = 0  # 当前步数计数器
        self.urdf_path = urdf_path
        self.max_torque = float(max_torque)
        self.time_step = float(time_step)
        self.frame_skip = int(frame_skip)
        self._anchor_position = tuple(anchor_position)
        
        # ---- Bullet connection ----
        self._cid = p.connect(p.GUI if render_mode == "gui" else p.DIRECT)
        p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)  # 禁用GUI控件
        p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0)  # 禁用阴影以提高性能
        p.setTimeStep(self.time_step)
        p.setGravity(0, 0, -9.8)
        p.setAdditionalSearchPath(pybullet_data.getDataPath())
        
        # 以 -1 作为"未创建"的无效 id
        self.plane_id = -1
        self.robot_id = -1
        
        # 关节索引（根据你的 URDF：0=base_joint, 1=elbow_joint）
        self.joint_indices = (0, 1)
        
        self._build_world()
        
        # 设置相机位置
        p.resetDebugVisualizerCamera(
            cameraDistance=1.5,
            cameraYaw=30,
            cameraPitch=-30,
            cameraTargetPosition=[0, 0, 0.5]
        )

    # ---------- World building ----------
    def _build_world(self) -> None:
        if self.plane_id != -1:
            p.removeBody(self.plane_id)
            self.plane_id = -1
        if self.robot_id != -1:
            p.removeBody(self.robot_id)
            self.robot_id = -1

        plane_shape = p.createCollisionShape(p.GEOM_PLANE)
        self.plane_id = p.createMultiBody(
            baseMass=0, baseCollisionShapeIndex=plane_shape
        )

        if not Path(self.urdf_path).exists():
            raise FileNotFoundError(f"URDF not found: {self.urdf_path}")

        self.robot_id = p.loadURDF(self.urdf_path, [0, 0, 0.001], useFixedBase=False)

        # 释放关节（关闭默认速度电机），之后才好用 TORQUE_CONTROL
        for j in self.joint_indices:
            p.setJointMotorControl2(self.robot_id, j, p.VELOCITY_CONTROL, force=0.0)

        # 轻微固定基座，避免数值漂移
        p.createConstraint(
            parentBodyUniqueId=self.robot_id,
            parentLinkIndex=-1,
            childBodyUniqueId=-1,
            childLinkIndex=-1,
            jointType=p.JOINT_FIXED,
            jointAxis=[0, 0, 0],
            parentFramePosition=[0, 0, -0.0005],
            childFramePosition=list(self._anchor_position),
        )

    # ---------- Helpers ----------
    def _get_joint_state(self):
        assert self.robot_id != -1, "robot not loaded"
        s0 = p.getJointState(self.robot_id, self.joint_indices[0])
        s1 = p.getJointState(self.robot_id, self.joint_indices[1])
        q0, dq0 = float(s0[0]), float(s0[1])
        q1, dq1 = float(s1[0]), float(s1[1])
        return q0, q1, dq0, dq1

    def _get_ee_pos(self):
        """末端位置：优先取 child link 名为 'ball' 的那个 link；否则退化为 elbow 的 child link。"""
        assert self.robot_id != -1, "robot not loaded"
        ee_link_index = None
        n = p.getNumJoints(self.robot_id)
        for i in range(n):
            # jointInfo[12] 是 child link 名称
            child_link_name = p.getJointInfo(self.robot_id, i)[12].decode("utf-8")
            if child_link_name == "ball":
                ee_link_index = i
                break
        if ee_link_index is None:
            ee_link_index = self.joint_indices[-1]

        pos = p.getLinkState(
            self.robot_id, ee_link_index, computeForwardKinematics=True
        )[4]
        return float(pos[0]), float(pos[1]), float(pos[2])

    def _get_obs(self):
        q0, q1, dq0, dq1 = self._get_joint_state()
        return np.array([q0, q1, dq0, dq1], dtype=np.float32)

    # ---------- API ----------
    def reset(self, seed=None):
        self.current_step = 0  # 重置步数计数器
        p.resetSimulation()
        p.setTimeStep(self.time_step)
        p.setGravity(0, 0, -9.8)
        p.setAdditionalSearchPath(pybullet_data.getDataPath())

        self.plane_id = -1
        self.robot_id = -1
        self._build_world()

        # 轻微随机初始角度
        rng = np.random.default_rng(seed)
        for j in self.joint_indices:
            angle = float(rng.uniform(-0.05, 0.05))
            p.resetJointState(self.robot_id, j, targetValue=angle, targetVelocity=0.0)

        return self._get_obs()

    def step(self, action):
        assert self.robot_id != -1, "robot not loaded"

        self.current_step += 1

        a = float(np.clip(action[0], -1.0, 1.0))
        tau = a * self.max_torque

        # 关节0：力矩控制（关节1保持被动）
        p.setJointMotorControl2(
            self.robot_id, self.joint_indices[0], p.TORQUE_CONTROL, force=tau
        )

        for _ in range(self.frame_skip):
            p.stepSimulation()

        obs = self._get_obs()

        # 示例奖励（与GymEnv保持一致）
        q0, q1, dq0, dq1 = map(float, obs[:4])
        reward = -abs(q1)**2
        terminated = False
        truncated = False
        if (
            not np.isfinite(obs).all()
            or abs(q0) > 10
            or abs(q1) > 0.5
            or self.current_step >= self.max_episode_steps
        ):
            truncated = True
            reward -= 100000.0  # 非正常终止的惩罚

        return obs, reward, terminated, truncated

    def close(self):
        if p.isConnected():
            p.disconnect()


def create_policy_module(action_low, action_high, obs_dim=4):
    """创建与训练时相同结构的策略网络"""
    # 创建actor网络
    actor_net = nn.Sequential(
        nn.Linear(obs_dim, num_cells),  # 这里使用Linear而不是LazyLinear，因为我们知道输入维度
        nn.Tanh(),
        nn.Linear(num_cells, num_cells),
        nn.Tanh(),
        nn.Linear(num_cells, num_cells),
        nn.Tanh(),
        nn.Linear(num_cells, 2),  # 假设只有一个动作，需要loc和scale两个参数
        NormalParamExtractor(),
    )
    
    # 创建策略模块
    policy_module = TensorDictModule(
        actor_net, in_keys=["observation"], out_keys=["loc", "scale"]
    )
    
    # 创建概率策略
    policy_module = ProbabilisticActor(
        module=policy_module,
        spec=None,  # 我们不需要在这里使用spec，因为我们只用于推断
        in_keys=["loc", "scale"],
        distribution_class=TanhNormal,
        distribution_kwargs={
            "low": action_low,
            "high": action_high,
        },
        return_log_prob=False,  # 推断时不需要返回对数概率
    )
    
    return policy_module


def load_best_model(model_path, policy_module):
    """加载训练好的最佳模型"""
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"模型文件未找到: {model_path}")
    
    checkpoint = torch.load(model_path, map_location=device)
    policy_module.load_state_dict(checkpoint['policy_state_dict'])
    
    logger.info(f"✅ 成功加载最佳模型！(来自第{checkpoint['epoch']}次迭代, 评估奖励: {checkpoint['eval_reward']:.4f})")
    return checkpoint['eval_reward']


def run_episodes(env, policy_module, num_episodes=5, max_steps=1000, sleep_time=0.05):
    """运行多个回合，显示渲染并记录性能"""
    episode_rewards = []
    episode_steps = []
    
    for episode in range(num_episodes):
        logger.info(f"\n{'='*60}\n回合 {episode+1}/{num_episodes} 开始\n{'='*60}")
        
        # 重置环境
        obs = env.reset(seed=None)  # 随机种子
        
        # 准备记录数据
        total_reward = 0.0
        steps = 0
        
        # 开始回合
        with set_exploration_type(ExplorationType.DETERMINISTIC), torch.no_grad():
            done = False
            
            while not done and steps < max_steps:
                # 转换观测为tensor
                obs_tensor = torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
                
                # 创建tensordict
                tensordict = TensorDict({"observation": obs_tensor}, batch_size=[1])
                
                # 使用策略网络预测动作
                policy_out = policy_module(tensordict)
                
                # 确保动作至少是1维的
                action = policy_out["action"].cpu().numpy()
                action = np.atleast_1d(action)  # 确保至少是1维
                
                # 执行动作
                obs, reward, terminated, truncated = env.step(action)
                print(obs, reward)
                done = terminated or truncated
                if terminated:
                    print("\r\n\r\n\r\nEpisode terminated!\r\n\r\n\r\n")
                if truncated:
                    print("\r\n\r\n\r\nEpisode truncated!\r\n\r\n\r\n")
                # 更新统计信息
                total_reward += reward
                steps += 1
                
                # 每50步输出一次信息
                # if steps % 50 == 0 or done:
                #     logger.info(f"Step {steps}: Action={action[0]:.4f}, Reward={reward:.4f}, Total={total_reward:.4f}")
                
                # 添加短暂延迟以便观察渲染
                time.sleep(sleep_time)  # 根据需要调整延迟
            
        # 记录回合结果
        episode_rewards.append(total_reward)
        episode_steps.append(steps)
        logger.info(f"回合 {episode+1} 结束: 总奖励={total_reward:.4f}, 步数={steps}")
    
    return episode_rewards, episode_steps


def main():
    """主函数"""
    # 模型路径
    # model_path = "logs/best_ppo_model_1.pt"
    model_path = "logs/best_ppo_model.pt"
    # model_path = "logs/torchrl_ppo/checkpoint_iter_0.pt"
    # logs/best_ppo_model_1.pt
    # model_path = "logs/checkpoint_iter_0.pt"
    
    # 动作范围 - 假设范围是[-1, 1]
    action_low = torch.tensor([-1.0])
    action_high = torch.tensor([1.0])
    
    # 创建策略模块
    policy_module = create_policy_module(action_low, action_high)
    
    try:
        # 加载最佳模型
        best_eval_reward = load_best_model(model_path, policy_module)
        
        # 确保logs文件夹存在
        os.makedirs("logs", exist_ok=True)
        
        # 创建环境并运行测试
        env = GymEnvGUI(
            urdf_path="urdfs/simple_arm.urdf", 
            render_mode="gui",
            time_step=1.0/100.0,  # 可以调整以获得更流畅的渲染
            max_episode_steps=2500
        )
        
        logger.info("✅ 已创建PyBullet GUI环境")
        
        # 运行多个回合
        num_episodes = 5  # 可以根据需要调整
        episode_rewards, episode_steps = run_episodes(
            env, 
            policy_module, 
            num_episodes=num_episodes,
            max_steps=1000,
            sleep_time=0.01  # 不需要额外延迟，PyBullet GUI自己有渲染速率
        )
        
        # 输出测试统计信息
        logger.info("\n" + "="*60)
        logger.info("📊 测试结果摘要:")
        logger.info(f"  测试回合数: {num_episodes}")
        logger.info(f"  平均奖励: {sum(episode_rewards)/len(episode_rewards):.4f} ± {np.std(episode_rewards):.4f}")
        logger.info(f"  平均步数: {sum(episode_steps)/len(episode_steps):.2f}")
        logger.info(f"  最大奖励: {max(episode_rewards):.4f}")
        logger.info(f"  最小奖励: {min(episode_rewards):.4f}")
        logger.info("="*60)
        
        # 关闭环境
        env.close()
            
    except Exception as e:
        logger.error(f"发生错误: {e}")
        raise


if __name__ == "__main__":
    main()