"""
PPO 训练器（基于 Lightning Fabric）

使用 Lightning Fabric 实现的 PPO 训练器，支持分布式训练和高效的检查点管理。
"""

from __future__ import annotations

import logging
from pathlib import Path
from typing import Optional

import numpy as np
import torch
import torch.optim as optim
from lightning.fabric import Fabric
from tqdm import tqdm
import wandb

from parnassus_train.criterions import PPOLoss
from parnassus_train.models import ActorCriticNetwork
from parnassus_train.tasks import ArmBalanceTask
from parnassus_train.utils import RolloutBuffer, compute_gae, normalize_advantages

logger = logging.getLogger(__name__)


class PPOTrainer:
    """
    PPO 训练器
    
    基于 Lightning Fabric 实现，支持：
    - 分布式训练（多GPU/多节点）
    - 混合精度训练
    - 自动梯度裁剪
    - 检查点管理
    - Weights & Biases 集成
    """
    
    def __init__(
        self,
        task: ArmBalanceTask,
        fabric: Optional[Fabric] = None,
    ):
        """
        初始化训练器
        
        Args:
            task: 任务配置
            fabric: Lightning Fabric 实例（可选，自动创建）
        """
        self.task = task
        
        # 初始化 Fabric
        if fabric is None:
            self.fabric = Fabric(
                accelerator="auto",
                devices="auto",
                precision="32-true",  # 可以改为 "16-mixed" 启用混合精度
            )
        else:
            self.fabric = fabric
        
        # 创建环境
        self.env = task.create_env()
        
        # 创建模型
        self.model = ActorCriticNetwork(
            obs_dim=task.obs_dim,
            action_dim=task.action_dim,
            hidden_dim=task.hidden_dim,
        )
        
        # 创建优化器
        self.optimizer = optim.Adam(self.model.parameters(), lr=task.learning_rate)
        
        # 创建损失函数
        self.criterion = PPOLoss(
            clip_epsilon=task.clip_epsilon,
            value_coef=task.value_coef,
            entropy_coef=task.entropy_coef,
        )
        
        # 使用 Fabric 包装模型和优化器
        self.model, self.optimizer = self.fabric.setup(self.model, self.optimizer)
        
        # 经验回放缓冲区
        self.buffer = RolloutBuffer()
        
        # 训练统计
        self.all_returns = []
        self.all_lengths = []
        self.top_checkpoints = []  # (return, episode, checkpoint_path)
        self.max_checkpoints_to_keep = 5
        
        logger.info(f"PPO Trainer initialized with task: {task.wandb_project}")
        logger.info(f"Model parameters: {sum(p.numel() for p in self.model.parameters()):,}")
    
    def select_action(self, obs: np.ndarray) -> tuple[np.ndarray, float, float]:
        """
        根据当前策略选择动作
        
        Args:
            obs: 观测数组 [obs_dim]
            
        Returns:
            action: 选择的动作 [action_dim]
            value: 状态价值估计
            log_prob: 动作对数概率
        """
        obs_tensor = torch.FloatTensor(obs).unsqueeze(0).to(self.fabric.device)
        
        with torch.no_grad():
            action, log_prob, value = self.model.get_action(obs_tensor)
        
        return (
            action.squeeze(0).cpu().numpy(),
            value.squeeze(0).cpu().item(),
            log_prob.cpu().item(),
        )
    
    def collect_episode(self, episode: int) -> dict:
        """
        收集一个完整的episode
        
        Args:
            episode: episode编号
            
        Returns:
            episode_info: episode信息字典
        """
        obs, _ = self.env.reset(seed=episode)
        self.buffer.clear()
        
        episode_return = 0.0
        episode_length = 0
        
        # 用于追踪关节1（摆杆）的状态
        q1_values = []
        dq1_values = []
        angle_errors = []
        
        for step in range(self.task.max_episode_steps):
            # 选择动作
            action, value, log_prob = self.select_action(obs)
            
            # 环境交互
            next_obs, reward, terminated, truncated, info = self.env.step(action)
            done = terminated or truncated
            
            # 记录关节1状态
            if len(next_obs) >= 4:
                _, q1, _, dq1 = next_obs
                q1_values.append(q1)
                dq1_values.append(dq1)
                q1_normalized = np.arctan2(np.sin(q1), np.cos(q1))
                angle_errors.append(abs(q1_normalized))
            
            # 存储经验
            self.buffer.add(obs, action, reward, value, log_prob, done)
            
            episode_return += reward
            episode_length += 1
            
            obs = next_obs
            
            if done:
                break
        
        # 计算统计指标
        episode_info = {
            "episode_return": episode_return,
            "episode_length": episode_length,
        }
        
        # 添加关节1统计指标
        if q1_values:
            episode_info.update({
                "q1_mean": np.mean(q1_values),
                "q1_std": np.std(q1_values),
                "q1_abs_mean": np.mean(np.abs(q1_values)),
                "q1_final": q1_values[-1],
                "dq1_mean": np.mean(dq1_values),
                "dq1_std": np.std(dq1_values),
                "dq1_abs_mean": np.mean(np.abs(dq1_values)),
                "dq1_final": dq1_values[-1],
                "angle_error_mean": np.mean(angle_errors),
                "angle_error_max": np.max(angle_errors),
                "angle_error_final": angle_errors[-1],
            })
        
        return episode_info
    
    def update_policy(self) -> dict:
        """
        执行PPO更新
        
        Returns:
            metrics: 包含损失和其他指标的字典
        """
        if self.buffer.is_empty():
            return {
                "loss": 0.0,
                "policy_loss": 0.0,
                "value_loss": 0.0,
                "entropy_loss": 0.0,
            }
        
        # 获取缓冲区数据
        observations, actions, rewards, values, old_log_probs, dones = self.buffer.get()
        
        # 移动到设备
        observations = observations.to(self.fabric.device)
        actions = actions.to(self.fabric.device)
        rewards = rewards.to(self.fabric.device)
        values = values.to(self.fabric.device)
        old_log_probs = old_log_probs.to(self.fabric.device)
        dones = dones.to(self.fabric.device)
        
        # 计算GAE和目标回报
        with torch.no_grad():
            # 获取最后一个状态的价值（如果episode未结束）
            if dones[-1] == 0:
                last_value = self.model.get_value(observations[-1:])
                next_value = last_value.item()
            else:
                next_value = 0.0
            
            advantages, returns = compute_gae(
                rewards, values, dones, 
                gamma=self.task.gamma,
                gae_lambda=self.task.gae_lambda,
                next_value=next_value,
            )
            
            advantages = advantages.to(self.fabric.device)
            returns = returns.to(self.fabric.device)
            
            # 标准化优势
            advantages = normalize_advantages(advantages)
        
        # 多轮更新
        total_metrics = {}
        n_updates = 0
        
        for epoch in range(self.task.n_epochs):
            # 生成随机索引用于mini-batch更新
            indices = torch.randperm(len(self.buffer), device=self.fabric.device)
            
            # Mini-batch更新
            for start_idx in range(0, len(self.buffer), self.task.batch_size):
                end_idx = min(start_idx + self.task.batch_size, len(self.buffer))
                batch_indices = indices[start_idx:end_idx]
                
                # 提取mini-batch数据
                batch_obs = observations[batch_indices]
                batch_actions = actions[batch_indices]
                batch_old_log_probs = old_log_probs[batch_indices]
                batch_advantages = advantages[batch_indices]
                batch_returns = returns[batch_indices]
                
                # 评估当前策略
                log_probs, entropy, pred_values = self.model.evaluate_actions(
                    batch_obs, batch_actions
                )
                
                # 计算损失
                loss, loss_dict = self.criterion(
                    log_probs=log_probs,
                    old_log_probs=batch_old_log_probs,
                    advantages=batch_advantages,
                    values=pred_values.squeeze(-1),
                    returns=batch_returns,
                    entropy=entropy,
                )
                
                # 反向传播
                self.fabric.backward(loss)
                
                # 梯度裁剪
                self.fabric.clip_gradients(
                    self.model, 
                    self.optimizer, 
                    max_norm=self.task.max_grad_norm,
                )
                
                self.optimizer.step()
                self.optimizer.zero_grad()
                
                # 累积统计
                for key, value in loss_dict.items():
                    if key not in total_metrics:
                        total_metrics[key] = 0.0
                    total_metrics[key] += value.item()
                n_updates += 1
        
        # 清空缓冲区
        self.buffer.clear()
        
        # 返回平均指标
        metrics = {key: value / n_updates for key, value in total_metrics.items()}
        return metrics
    
    def save_checkpoint(self, episode: int, episode_return: float):
        """
        保存检查点（仅保存top-5）
        
        Args:
            episode: episode编号
            episode_return: episode回报
        """
        # 检查是否应该保存这个检查点
        should_save = False
        if len(self.top_checkpoints) < self.max_checkpoints_to_keep:
            should_save = True
        elif episode_return > self.top_checkpoints[0][0]:
            should_save = True
        
        if not should_save:
            return
        
        # 创建检查点目录
        checkpoint_dir = Path(self.task.save_dir)
        checkpoint_dir.mkdir(parents=True, exist_ok=True)
        
        # 生成检查点文件名
        checkpoint_filename = f"ppo_episode_{episode:04d}_return_{episode_return:.2f}.pt"
        checkpoint_path = checkpoint_dir / checkpoint_filename
        
        # 保存检查点
        checkpoint = {
            "episode": episode,
            "model_state_dict": self.model.state_dict(),
            "optimizer_state_dict": self.optimizer.state_dict(),
            "episode_return": episode_return,
            "all_returns": self.all_returns,
            "all_lengths": self.all_lengths,
            "task_config": self.task.to_dict(),
        }
        
        self.fabric.save(checkpoint_path, checkpoint)
        
        # 更新top-5列表
        self.top_checkpoints.append((episode_return, episode, checkpoint_path))
        self.top_checkpoints.sort(key=lambda x: x[0])
        
        # 删除最差的检查点
        if len(self.top_checkpoints) > self.max_checkpoints_to_keep:
            worst_return, worst_episode, worst_path = self.top_checkpoints.pop(0)
            if worst_path.exists():
                worst_path.unlink()
                logger.info(
                    f"Removed checkpoint: {worst_path.name} (return: {worst_return:.2f})"
                )
        
        # 定期打印日志
        if episode % self.task.save_interval == 0:
            rank = next(i for i, (r, _, _) in enumerate(self.top_checkpoints, 1) if r == episode_return)
            logger.info(
                f"Checkpoint saved: {checkpoint_path.name} "
                f"(return: {episode_return:.2f}, rank: {rank}/{self.max_checkpoints_to_keep})"
            )
    
    def train(self):
        """执行训练主循环"""
        logger.info("=" * 60)
        logger.info("Starting PPO Training")
        logger.info(f"Server address: {self.task.server_address}")
        logger.info(f"Number of episodes: {self.task.num_episodes}")
        logger.info("=" * 60)
        
        # 初始化 Weights & Biases
        if self.task.use_wandb and self.fabric.is_global_zero:
            wandb.init(
                project=self.task.wandb_project,
                name=self.task.wandb_name,
                config=self.task.to_dict(),
            )
            wandb.watch(self.model, log="all", log_freq=100)
            logger.info(f"Weights & Biases initialized: {wandb.run.name}")
        
        # 训练循环
        pbar = tqdm(
            range(1, self.task.num_episodes + 1),
            desc="Training episodes",
            disable=not self.fabric.is_global_zero,
        )
        
        for episode in pbar:
            # 收集episode
            episode_info = self.collect_episode(episode)
            
            # 更新策略
            metrics = self.update_policy()
            
            # 记录统计信息
            self.all_returns.append(episode_info["episode_return"])
            self.all_lengths.append(episode_info["episode_length"])
            
            # 计算平均指标
            recent_returns = self.all_returns[-self.task.log_interval:]
            recent_lengths = self.all_lengths[-self.task.log_interval:]
            avg_return = np.mean(recent_returns)
            avg_length = np.mean(recent_lengths)
            
            # 更新进度条
            if self.fabric.is_global_zero:
                pbar.set_postfix({
                    "Return": f"{episode_info['episode_return']:.2f}",
                    "AvgReturn": f"{avg_return:.2f}",
                    "Length": episode_info["episode_length"],
                    "Loss": f'{metrics.get("loss", 0):.4f}',
                })
            
            # 记录到 W&B
            if self.task.use_wandb and self.fabric.is_global_zero:
                log_dict = {
                    "episode": episode,
                    "avg_return": avg_return,
                    "avg_length": avg_length,
                    **episode_info,
                    **metrics,
                }
                wandb.log(log_dict)
            
            # 定期打印日志
            if episode % self.task.log_interval == 0 and self.fabric.is_global_zero:
                logger.info(
                    f"Episode {episode:4d} | "
                    f"Return: {episode_info['episode_return']:8.2f} | "
                    f"Avg Return: {avg_return:8.2f} | "
                    f"Length: {episode_info['episode_length']:4d} | "
                    f"Loss: {metrics.get('loss', 0):8.4f}"
                )
            
            # 保存检查点
            if self.fabric.is_global_zero:
                self.save_checkpoint(episode, episode_info["episode_return"])
        
        # 训练完成
        pbar.close()
        self.env.close()
        
        if self.fabric.is_global_zero:
            logger.info("=" * 60)
            logger.info("Training completed")
            logger.info(f"Total episodes: {len(self.all_returns)}")
            if len(self.all_returns) > 0:
                logger.info(f"Final average return: {np.mean(self.all_returns[-100:]):.2f}")
            
            # 输出top-5检查点信息
            if self.top_checkpoints:
                logger.info("\nTop 5 checkpoints by episode return:")
                for idx, (ret, ep, path) in enumerate(reversed(self.top_checkpoints), 1):
                    logger.info(f"  {idx}. Episode {ep:4d}: {ret:8.2f} - {path.name}")
            
            logger.info("=" * 60)
            
            # 关闭 W&B
            if self.task.use_wandb:
                wandb.finish()
