from collections import defaultdict
import argparse
import os

import matplotlib.pyplot as plt
import torch
from tensordict.nn import TensorDictModule
from tensordict.nn.distributions import NormalParamExtractor
from torch import nn

from torchrl.collectors import SyncDataCollector
from torchrl.data.replay_buffers import ReplayBuffer
from torchrl.data.replay_buffers.samplers import SamplerWithoutReplacement
from torchrl.data.replay_buffers.storages import LazyTensorStorage
from torchrl.envs import (
    Compose,
    DoubleToFloat,
    ObservationNorm,
    StepCounter,
    TransformedEnv,
)
from torchrl.envs.libs.gym import GymEnv
from torchrl.envs.utils import check_env_specs, ExplorationType, set_exploration_type
from torchrl.modules import ProbabilisticActor, TanhNormal, ValueOperator
from torchrl.objectives import ClipPPOLoss
from torchrl.objectives.value import GAE
from tqdm import tqdm

import multiprocessing

from parnassus.envs.arm.torchrl_env import ArmTorchRLEnv


# ============ 命令行参数解析 ============
parser = argparse.ArgumentParser(description='Train PPO agent with optional pretrained model')
parser.add_argument('--load-model', type=str, default="logs/best_ppo_model.pt", metavar='PATH',
                    help='path to pretrained model checkpoint (default: None)')
# parser.add_argument('--load-model', type=str, metavar='PATH',
#                     help='path to pretrained model checkpoint (default: None)')

parser.add_argument('--lr', type=float, default=0.0003,
                    help='learning rate (default: 3e-4)')
parser.add_argument('--frames-per-batch', type=int, default=5000,
                    help='frames per batch (default: 5000)')
parser.add_argument('--total-frames', type=int, default=100_000,
                    help='total training frames (default: 2000000)')
parser.add_argument('--save-dir', type=str, default='logs',
                    help='directory to save models (default: logs)')


args = parser.parse_args()


#定义超参数
is_fork = multiprocessing.get_start_method() == "fork"
device = (
    torch.device(0)
    if torch.cuda.is_available() and not is_fork
    else torch.device("cpu")
)
num_cells = 32  # number of cells in each layer i.e. output dim.
lr = args.lr
# print(f"lr: {lr}")
max_grad_norm = 1.0

#数据收集
frames_per_batch = args.frames_per_batch  # 增加每批采样步数
# For a complete training, bring the number of frames up to 5M
total_frames = args.total_frames  # 增加总训练步数到500万


#ppo参数
sub_batch_size = 256  # 增加子批次大小
num_epochs = 10  # optimization steps per batch of data collected
clip_epsilon = (
    0.2  # clip value for PPO loss: see the equation in the intro for more context.
)
gamma = 0.99
lmbda = 0.95
entropy_eps = 1e-4

#定义环境
# base_env = GymEnv("InvertedDoublePendulum-v4", device=device)
base_env = ArmTorchRLEnv(address="localhost:50051", device=device)

env = TransformedEnv(
    base_env,
    Compose(
        # normalize observations
        ObservationNorm(in_keys=["observation"]),
        DoubleToFloat(),
        StepCounter(),
    ),
)
env.transform[0].init_stats(num_iter=1000, reduce_dim=0, cat_dim=0)

print("normalization constant shape:", env.transform[0].loc.shape)
# check_env_specs(env)
# rollout = env.rollout(3)
# print("rollout of three steps:", rollout)
# print("Shape of the rollout TensorDict:", rollout.batch_size)

#定义网络
actor_net = nn.Sequential(
    nn.LazyLinear(num_cells, device=device),
    nn.Tanh(),
    nn.LazyLinear(num_cells, device=device),
    nn.Tanh(),
    nn.LazyLinear(num_cells, device=device),
    nn.Tanh(),
    nn.LazyLinear(2 * env.action_spec.shape[-1], device=device),
    NormalParamExtractor(),
)

policy_module = TensorDictModule(
    actor_net, in_keys=["observation"], out_keys=["loc", "scale"]
)

policy_module = ProbabilisticActor(
    module=policy_module,
    spec=env.action_spec,
    in_keys=["loc", "scale"],
    distribution_class=TanhNormal,
    distribution_kwargs={
        "low": env.action_spec_unbatched.space.low,
        "high": env.action_spec_unbatched.space.high,
    },
    return_log_prob=True,
    # we'll need the log-prob for the numerator of the importance weights
)

value_net = nn.Sequential(
    nn.LazyLinear(num_cells, device=device),
    nn.Tanh(),
    nn.LazyLinear(num_cells, device=device),
    nn.Tanh(),
    nn.LazyLinear(num_cells, device=device),
    nn.Tanh(),
    nn.LazyLinear(1, device=device),
)

value_module = ValueOperator(
    module=value_net,
    in_keys=["observation"],
)

#数据收集器
collector = SyncDataCollector(
    env,
    policy_module,
    frames_per_batch=frames_per_batch,
    total_frames=total_frames,
    split_trajs=False,
    device=device,
)
#replay buffer
replay_buffer = ReplayBuffer(
    storage=LazyTensorStorage(max_size=frames_per_batch),
    sampler=SamplerWithoutReplacement(),
)

#loss
# 在创建模块后，训练开始前添加初始化代码

# 用虚拟数据初始化网络
with torch.no_grad():
    # 创建一个与观测空间形状相同的虚拟数据
    dummy_data = env.reset()
    
    # 初始化 policy 网络
    policy_module(dummy_data)
    
    # 初始化 value 网络  
    value_module(dummy_data)
    
    
advantage_module = GAE(
    gamma=gamma, lmbda=lmbda, value_network=value_module, average_gae=True
)

loss_module = ClipPPOLoss(
    actor_network=policy_module,
    critic_network=value_module,
    clip_epsilon=clip_epsilon,
    entropy_bonus=bool(entropy_eps),
    entropy_coeff=entropy_eps,
    # these keys match by default but we set this for completeness
    critic_coeff=1.0,
    loss_critic_type="smooth_l1",
)

optim = torch.optim.Adam(loss_module.parameters(), lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
    optim, total_frames // frames_per_batch, 0.0
)


# ============ 加载预训练模型（如果指定） ============
start_iteration = 0
if args.load_model is not None:
    if not os.path.exists(args.load_model):
        raise FileNotFoundError(f"Pretrained model not found: {args.load_model}")
    
    print(f"\n{'='*60}")
    print(f"Loading pretrained model from: {args.load_model}")
    print(f"{'='*60}")
    
    checkpoint = torch.load(args.load_model, map_location=device)
    
    # 加载模型参数
    policy_module.load_state_dict(checkpoint['policy_state_dict'])
    value_module.load_state_dict(checkpoint['value_state_dict'])
    
    # 加载优化器状态（可选，继续使用之前的学习率）
    if 'optimizer_state_dict' in checkpoint:
        try:
            optim.load_state_dict(checkpoint['optimizer_state_dict'])
            print("✅ Loaded optimizer state")
            # ✅ 重新设置学习率为命令行指定的值
            for param_group in optim.param_groups:
                param_group['lr'] = args.lr
            print(f"✅ Reset learning rate to {args.lr}")
            
        except Exception as e:
            print(f"⚠️  Could not load optimizer state: {e}")
    
    # 恢复训练进度
    if 'epoch' in checkpoint:
        start_iteration = checkpoint['epoch'] + 1
        print(f"✅ Resuming from iteration {start_iteration}")
    
    if 'eval_reward' in checkpoint:
        print(f"✅ Previous best eval reward: {checkpoint['eval_reward']:.4f}")
    
    print(f"{'='*60}\n")





#train

# 确保保存目录存在
os.makedirs(args.save_dir, exist_ok=True)

logs = defaultdict(list)
pbar = tqdm(total=total_frames, dynamic_ncols=True)
eval_str = ""

# 用于保存最佳模型
best_eval_reward = float('-inf')
best_model_path = os.path.join(args.save_dir, "best_ppo_model.pt")

# 如果加载了预训练模型，尝试恢复最佳奖励
if args.load_model is not None:
    try:
        checkpoint = torch.load(args.load_model, map_location=device)
        if 'eval_reward' in checkpoint:
            best_eval_reward = checkpoint['eval_reward']
            # best_eval_reward = -1000000000.0
            print(f"Restored best eval reward: {best_eval_reward:.4f}")
    except:
        pass

# We iterate over the collector until it reaches the total number of frames it was
# designed to collect:
for i, tensordict_data in enumerate(collector):
    # we now have a batch of data to work with. Let's learn something from it.
    for _ in range(num_epochs):
        # We'll need an "advantage" signal to make PPO work.
        # We re-compute it at each epoch as its value depends on the value
        # network which is updated in the inner loop.
        advantage_module(tensordict_data)
        data_view = tensordict_data.reshape(-1)
        replay_buffer.extend(data_view.cpu())
        for _ in range(frames_per_batch // sub_batch_size):
            subdata = replay_buffer.sample(sub_batch_size)
            loss_vals = loss_module(subdata.to(device))
            loss_value = (
                loss_vals["loss_objective"]
                + loss_vals["loss_critic"]
                + loss_vals["loss_entropy"]
            )

            # Optimization: backward, grad clipping and optimization step
            loss_value.backward()
            # this is not strictly mandatory but it's good practice to keep
            # your gradient norm bounded
            torch.nn.utils.clip_grad_norm_(loss_module.parameters(), max_grad_norm)
            optim.step()
            optim.zero_grad()

    logs["reward"].append(tensordict_data["next", "reward"].mean().item())
    cum_reward_str = (
        f"average reward={logs['reward'][-1]: 4.4f} (init={logs['reward'][0]: 4.4f})"
    )
    logs["step_count"].append(tensordict_data["step_count"].max().item())
    stepcount_str = f"step count (max): {logs['step_count'][-1]}"
    logs["lr"].append(optim.param_groups[0]["lr"])
    lr_str = f"lr policy: {logs['lr'][-1]: 4.10f}"
    
    # 每批次更新进度条
    pbar.update(tensordict_data.numel())
    pbar.set_description(", ".join([cum_reward_str, stepcount_str, lr_str]))
    pbar.refresh()
    
    if i % 10 == 0:
        # We evaluate the policy once every 10 batches of data.
        # Evaluation is rather simple: execute the policy without exploration
        # (take the expected value of the action distribution) for a given
        # number of steps (1000, which is our ``env`` horizon).
        # The ``rollout`` method of the ``env`` can take a policy as argument:
        # it will then execute this policy at each step.
        with set_exploration_type(ExplorationType.DETERMINISTIC), torch.no_grad():
            # execute a rollout with the trained policy
            eval_rollout = env.rollout(1000, policy_module)
            logs["eval reward"].append(eval_rollout["next", "reward"].mean().item())
            logs["eval reward (sum)"].append(
                eval_rollout["next", "reward"].sum().item()
            )
            logs["eval step_count"].append(eval_rollout["step_count"].max().item())
            eval_str = (
                f"eval cumulative reward: {logs['eval reward (sum)'][-1]: 4.4f} "
                f"(init: {logs['eval reward (sum)'][0]: 4.4f}), "
                f"eval step-count: {logs['eval step_count'][-1]}"
            )
            
            # 保存最佳模型
            current_eval_reward = logs['eval reward (sum)'][-1]
            if current_eval_reward > best_eval_reward:
                best_eval_reward = current_eval_reward
                torch.save({
                    'epoch': i,
                    'policy_state_dict': policy_module.state_dict(),
                    'value_state_dict': value_module.state_dict(),
                    'optimizer_state_dict': optim.state_dict(),
                    'eval_reward': best_eval_reward,
                    'training_args': vars(args),  # 保存训练参数
                }, best_model_path)
                print(f"\n✅ Saved best model with eval reward: {best_eval_reward:.4f}")
            
            # 定期保存检查点（每50次评估）
            if i % 50 == 0:
                checkpoint_path = os.path.join(args.save_dir, f"checkpoint_iter_{i}.pt")
                torch.save({
                    'epoch': i,
                    'policy_state_dict': policy_module.state_dict(),
                    'value_state_dict': value_module.state_dict(),
                    'optimizer_state_dict': optim.state_dict(),
                    'eval_reward': logs['eval reward (sum)'][-1],
                    'training_args': vars(args),
                }, checkpoint_path)
                print(f"\n💾 Saved checkpoint at iteration {i}")
            
            del eval_rollout
            
        # 每次评估后更新进度条描述
        pbar.set_description(", ".join([eval_str, cum_reward_str, stepcount_str, lr_str]))
        pbar.refresh()

    # We're also using a learning rate scheduler. Like the gradient clipping,
    # this is a nice-to-have but nothing necessary for PPO to work.
    scheduler.step()

pbar.close()
print(f"\n🎉 Training completed! Best eval reward: {best_eval_reward:.4f}")
    

# ============ 使用最佳模型进行测试 ============
print("\n" + "="*60)
print("Testing with best model...")
print("="*60)

# 加载最佳模型
checkpoint = torch.load(best_model_path)
policy_module.load_state_dict(checkpoint['policy_state_dict'])
value_module.load_state_dict(checkpoint['value_state_dict'])
print(f"✅ Loaded best model from epoch {checkpoint['epoch']} with eval reward: {checkpoint['eval_reward']:.4f}")

# 使用最佳模型进行多次测试
num_test_episodes = 10
test_rewards = []
test_step_counts = []

with set_exploration_type(ExplorationType.DETERMINISTIC), torch.no_grad():
    for episode in range(num_test_episodes):
        test_rollout = env.rollout(1000, policy_module)
        episode_reward = test_rollout["next", "reward"].sum().item()
        episode_steps = test_rollout["step_count"].max().item()
        test_rewards.append(episode_reward)
        test_step_counts.append(episode_steps)
        print(f"Test Episode {episode+1}: Reward={episode_reward:.4f}, Steps={episode_steps}")
        del test_rollout

print(f"\n📊 Test Results (Best Model):")
print(f"  Average Reward: {sum(test_rewards)/len(test_rewards):.4f} ± {torch.tensor(test_rewards).std().item():.4f}")
print(f"  Average Steps: {sum(test_step_counts)/len(test_step_counts):.2f}")
print(f"  Max Reward: {max(test_rewards):.4f}")
print(f"  Min Reward: {min(test_rewards):.4f}")

# ============ 绘制训练曲线 ============
plt.figure(figsize=(15, 10))

# 1. 训练奖励
plt.subplot(2, 3, 1)
plt.plot(logs["reward"])
plt.xlabel("Iteration")
plt.ylabel("Average Reward")
plt.title("Training Rewards (Average per Batch)")
plt.grid(True)

# 2. 训练步数
plt.subplot(2, 3, 2)
plt.plot(logs["step_count"])
plt.xlabel("Iteration")
plt.ylabel("Step Count")
plt.title("Max Step Count (Training)")
plt.grid(True)

# 3. 评估累计奖励
plt.subplot(2, 3, 3)
if logs["eval reward (sum)"]:
    plt.plot(logs["eval reward (sum)"])
    plt.xlabel("Evaluation Point")
    plt.ylabel("Cumulative Reward")
    plt.title("Evaluation Cumulative Reward")
    plt.axhline(y=best_eval_reward, color='r', linestyle='--', label=f'Best: {best_eval_reward:.2f}')
    plt.legend()
    plt.grid(True)

# 4. 评估步数
plt.subplot(2, 3, 4)
if logs["eval step_count"]:
    plt.plot(logs["eval step_count"])
    plt.xlabel("Evaluation Point")
    plt.ylabel("Step Count")
    plt.title("Max Step Count (Evaluation)")
    plt.grid(True)

# 5. 学习率变化
plt.subplot(2, 3, 5)
plt.plot(logs["lr"])
plt.xlabel("Iteration")
plt.ylabel("Learning Rate")
plt.title("Learning Rate Schedule")
plt.grid(True)

# 6. 最佳模型测试结果
plt.subplot(2, 3, 6)
plt.bar(range(1, num_test_episodes+1), test_rewards)
plt.axhline(y=sum(test_rewards)/len(test_rewards), color='r', linestyle='--', label='Mean')
plt.xlabel("Test Episode")
plt.ylabel("Cumulative Reward")
plt.title("Best Model Test Performance")
plt.legend()
plt.grid(True)

plt.tight_layout()
plot_path = os.path.join(args.save_dir, "training_results.png")
plt.savefig(plot_path, dpi=150)
print(f"\n💾 Saved training plot to {plot_path}")
plt.show()

# 关闭环境
env.close()
print("\n✅ Environment closed.")
print(f"\n{'='*60}")
print(f"Training Summary:")
print(f"  Best model saved to: {best_model_path}")
print(f"  Best eval reward: {best_eval_reward:.4f}")
print(f"  Training plots saved to: {plot_path}")
print(f"{'='*60}")