# # train_ppo_torchrl.py
# import os
# import math
# import time
# from typing import Callable

# import torch
# import torch.nn as nn
# from torch.optim import Adam

# from torchrl.envs import set_gym_backend
# set_gym_backend("gymnasium")  # 很重要：Gymnasium 而不是旧 gym

# from torchrl.envs.libs.gym import GymEnv
# from torchrl.envs import EnvCreator, TransformedEnv, Compose, StepCounter
# from torchrl.collectors import SyncDataCollector

# from torchrl.modules import MLP, ProbabilisticActor, ValueOperator, NormalParamWrapper
# from torchrl.modules.distributions import TanhNormal
# from torchrl.objectives import ClipPPOLoss
# from torchrl.objectives.value import GAE

# from tensordict.nn import TensorDictModule

# import numpy as np

# # 你的自定义环境
# from simple_arm_env import SimpleArmEnv

# # --------------- 可调超参 --------------- #
# DEVICE            = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# SEED              = 42
# NUM_WORKERS       = 4         # 简化起见用 1；可改大（比如 4/8），采样更快
# FRAMES_PER_BATCH  = 2048       # 每批采样步数（num_workers * 每worker步数）
# TOTAL_FRAMES      = 200_000    # 训练总步数
# MINI_BATCH_SIZE   = 256        # PPO 小批量
# PPO_EPOCHS        = 8          # 每个 batch 做几次 PPO 更新
# GAMMA             = 0.99
# LAMBDA            = 0.95       # GAE
# CLIP_EPS          = 0.2
# ENTROPY_COEF      = 0.0
# VALUE_COEF        = 0.5
# LR                = 3e-4
# MAX_GRAD_NORM     = 1.0

# MAX_TORQUE        = 15.0       # 对应你的 env.max_torque
# TIME_STEP         = 1.0/240.0  # 对应 env.time_step
# FRAME_SKIP        = 1
# # --------------------------------------- #


# def make_env(render_mode=None) -> TransformedEnv:
#     """创建一个 GymEnv 封装的 SimpleArmEnv，并加上计步 Transform。"""
#     # GymEnv 接受 gymnasium 环境实例
#     base = GymEnv(
#         env=SimpleArmEnv(
#             urdf_path="./urdf/simple_arm.urdf",
#             render_mode=render_mode,         # 训练时 None，评估时 "human"
#             max_torque=MAX_TORQUE,
#             time_step=TIME_STEP,
#             frame_skip=FRAME_SKIP,
#         ),
#         device=DEVICE,
#     )
#     # 可选 Transform：计步，避免无限长
#     env = TransformedEnv(base, Compose(StepCounter(max_steps=2500)))
#     return env


# def make_actor_critic(obs_dim: int, action_dim: int):
#     """构建策略与价值网络（TorchRL 风格）"""
#     # ---- 策略网络：MLP -> (mu, log_std) -> TanhNormal ----
#     policy_net = MLP(
#         in_features=obs_dim,
#         out_features=2 * action_dim,    # 高斯两个参数
#         num_cells=[128, 128],
#         activation_class=nn.Tanh,       # Tanh 激活，稳定些
#     )
#     # 正则化输出为 (loc, scale)
#     policy_param = NormalParamWrapper(policy_net, scale_lb=0.1, scale_ub=2.0)

#     # 构成 TensorDictModule：输入 'observation' -> 输出 'loc','scale'
#     policy_module = TensorDictModule(
#         policy_param, in_keys=["observation"], out_keys=["loc", "scale"]
#     )

#     # 概率 Actor：TanhNormal 分布，输出 'action'
#     actor = ProbabilisticActor(
#         module=policy_module,
#         in_keys=["loc", "scale"],
#         out_keys=["action"],
#         distribution_class=TanhNormal,
#         distribution_kwargs={"low": -1.0, "high": 1.0},  # 动作范围
#         return_log_prob=True,  # 训练时需要 log_prob
#     ).to(DEVICE)

#     # ---- 值函数 ----
#     value_net = MLP(
#         in_features=obs_dim, out_features=1, num_cells=[128, 128], activation_class=nn.Tanh
#     )
#     critic = ValueOperator(
#         module=value_net, in_keys=["observation"], out_keys=["state_value"]
#     ).to(DEVICE)

#     return actor, critic


# @torch.no_grad()
# def infer_obs_action_dims(tmp_env) -> tuple[int, int]:
#     td = tmp_env.reset()
#     obs = td["observation"]
#     obs_dim = obs.numel()
#     # 动作维度从 space 取更稳妥
#     action_dim = int(np.prod(tmp_env.action_spec.shape))
#     return obs_dim, action_dim


# def main():
#     torch.manual_seed(SEED)
#     np.random.seed(SEED)

#     # -------- 环境与维度 -------- #
#     # 采集器自己会创建 env，这里只用一个 env 推断维度
#     tmp_env = make_env(render_mode=None)
#     obs_dim, action_dim = infer_obs_action_dims(tmp_env)
#     tmp_env.close()

#     actor, critic = make_actor_critic(obs_dim, action_dim)
#     optim = Adam(list(actor.parameters()) + list(critic.parameters()), lr=LR)

#     # -------- 损失与 GAE -------- #
#     loss_module = ClipPPOLoss(
#         actor=actor,
#         critic=critic,
#         clip_epsilon=CLIP_EPS,
#         entropy_coef=ENTROPY_COEF,
#         critic_coef=VALUE_COEF,
#         gamma=GAMMA,
#         # 下面两行让 PPO 内部用 GAE（也可手动用 GAE 模块先算好 advantage）
#         gae_lambda=LAMBDA,
#         # 注意 TorchRL 会用默认 key: reward='reward', done/truncated='done',
#         # value='state_value', advantage='advantage', 等；保持默认即可
#     )

#     # 或手动：
#     # gae = GAE(gamma=GAMMA, lmbda=LAMBDA, value_network=critic)

#     # -------- 采集器 -------- #
#     # EnvCreator 确保每个 worker 独立创建 env
#     create_env_fn = EnvCreator(lambda: make_env(render_mode=None))
#     collector = SyncDataCollector(
#         create_env_fn=create_env_fn,
#         policy=actor,  # 采样时用 actor（其内部会随机采样动作）
#         frames_per_batch=FRAMES_PER_BATCH,
#         total_frames=TOTAL_FRAMES,
#         device=DEVICE,
#         storing_device=DEVICE,
#         init_random_frames=0,
#         reset_at_each_iter=False,
#     )

#     # -------- 训练循环 -------- #
#     frames = 0
#     log_every = 10
#     it = 0
#     start = time.time()

#     for tensordict_data in collector:
#         # tensordict_data 包含：observation, action, reward, done, next, log_prob, 等
#         frames += tensordict_data.numel()

#         # 计算损失（PPO 内部会根据 current policy & old log_prob 算 ratio）
#         # 1）计算 critic 的当前 value & advantage（如果没让 PPO 内部算 GAE，这里可先 gae）
#         # loss_module 会自动处理 'advantage' 和 'value_target' 的计算（当传了 gae_lambda）
#         # 2）多轮 epoch + 小批量 PPO 更新
#         for _ in range(PPO_EPOCHS):
#             # 打乱并切小批
#             for mb in tensordict_data.split(MINI_BATCH_SIZE, dim=0, drop_last=True):
#                 loss_vals = loss_module(mb)
#                 loss = loss_vals["loss_objective"] + loss_vals["loss_critic"] + loss_vals["loss_entropy"]

#                 optim.zero_grad(set_to_none=True)
#                 loss.backward()
#                 nn.utils.clip_grad_norm_(list(actor.parameters()) + list(critic.parameters()), MAX_GRAD_NORM)
#                 optim.step()

#         it += 1
#         if it % log_every == 0:
#             # 简单日志
#             ep_rew = tensordict_data["reward"].mean().item()
#             elapsed = time.time() - start
#             print(f"[{frames:>8d} frames] mean_step_reward={ep_rew:+.4f}  fps≈{int(frames/elapsed)}")

#         if frames >= TOTAL_FRAMES:
#             break

#     collector.shutdown()
#     # 可选：保存策略
#     torch.save({"actor": actor.state_dict(), "critic": critic.state_dict()}, "ppo_simple_arm.pt")
#     print("Training done. Saved to ppo_simple_arm.pt")


# if __name__ == "__main__":
#     main()
