#!/usr/bin/env python3
"""
模块使用示例

演示如何使用重构后的各个模块。
"""

from __future__ import annotations

import torch
import numpy as np

# 示例1: 使用环境模块
def example_env():
    """演示如何使用 Gymnasium 环境"""
    print("=" * 60)
    print("示例 1: 使用 Gymnasium 环境")
    print("=" * 60)
    
    from parnassus_train.envs import GrpcArmEnv
    
    # 创建环境
    env = GrpcArmEnv(
        server_address="localhost:50051",
        obs_dim=4,
        action_dim=1,
        max_episode_steps=500,
        reward_shaping=True,
    )
    
    print(f"观测空间: {env.observation_space}")
    print(f"动作空间: {env.action_space}")
    
    # 重置环境
    try:
        obs, info = env.reset(seed=42)
        print(f"初始观测: {obs}")
        print(f"附加信息: {info}")
        
        # 执行一步
        action = env.action_space.sample()
        print(f"随机动作: {action}")
        
        next_obs, reward, terminated, truncated, info = env.step(action)
        print(f"下一个观测: {next_obs}")
        print(f"奖励: {reward:.4f}")
        print(f"终止: {terminated}, 截断: {truncated}")
        
    except Exception as e:
        print(f"环境连接失败（需要先启动 gRPC 服务器）: {e}")
    finally:
        env.close()
    
    print()


# 示例2: 使用模型模块
def example_model():
    """演示如何使用 Actor-Critic 网络"""
    print("=" * 60)
    print("示例 2: 使用 Actor-Critic 网络")
    print("=" * 60)
    
    from parnassus_train.models import ActorCriticNetwork
    
    # 创建模型
    model = ActorCriticNetwork(
        obs_dim=4,
        action_dim=1,
        hidden_dim=64,
    )
    
    print(f"模型参数量: {sum(p.numel() for p in model.parameters()):,}")
    print(f"模型结构:\n{model}")
    
    # 前向传播
    obs = torch.randn(1, 4)
    print(f"\n输入观测: {obs}")
    
    mean, std, value = model.forward(obs)
    print(f"动作均值: {mean}")
    print(f"动作标准差: {std}")
    print(f"状态价值: {value}")
    
    # 采样动作
    action, log_prob, value = model.get_action(obs)
    print(f"\n采样动作: {action}")
    print(f"对数概率: {log_prob}")
    print(f"状态价值: {value}")
    
    # 评估动作
    actions = torch.randn(5, 1)
    obs_batch = torch.randn(5, 4)
    log_probs, entropy, values = model.evaluate_actions(obs_batch, actions)
    print(f"\n批量评估:")
    print(f"对数概率: {log_probs}")
    print(f"熵: {entropy}")
    print(f"价值: {values.squeeze()}")
    
    print()


# 示例3: 使用损失函数模块
def example_criterion():
    """演示如何使用 PPO 损失函数"""
    print("=" * 60)
    print("示例 3: 使用 PPO 损失函数")
    print("=" * 60)
    
    from parnassus_train.criterions import PPOLoss
    
    # 创建损失函数
    criterion = PPOLoss(
        clip_epsilon=0.2,
        value_coef=0.5,
        entropy_coef=0.01,
    )
    
    print(f"损失函数配置: {criterion}")
    
    # 模拟数据
    batch_size = 64
    log_probs = torch.randn(batch_size)
    old_log_probs = torch.randn(batch_size)
    advantages = torch.randn(batch_size)
    values = torch.randn(batch_size)
    returns = torch.randn(batch_size)
    entropy = torch.rand(batch_size)
    
    # 计算损失
    loss, loss_dict = criterion(
        log_probs=log_probs,
        old_log_probs=old_log_probs,
        advantages=advantages,
        values=values,
        returns=returns,
        entropy=entropy,
    )
    
    print(f"\n总损失: {loss.item():.4f}")
    print(f"策略损失: {loss_dict['policy_loss'].item():.4f}")
    print(f"价值损失: {loss_dict['value_loss'].item():.4f}")
    print(f"熵损失: {loss_dict['entropy_loss'].item():.4f}")
    print(f"比率均值: {loss_dict['ratio_mean'].item():.4f}")
    print(f"近似KL: {loss_dict['approx_kl'].item():.4f}")
    
    print()


# 示例4: 使用工具函数模块
def example_utils():
    """演示如何使用工具函数"""
    print("=" * 60)
    print("示例 4: 使用工具函数")
    print("=" * 60)
    
    from parnassus_train.utils import compute_gae, normalize_advantages, RolloutBuffer
    
    # 1. 使用缓冲区
    print("1. RolloutBuffer 示例:")
    buffer = RolloutBuffer()
    
    # 添加经验
    for i in range(10):
        obs = np.random.randn(4)
        action = np.random.randn(1)
        reward = np.random.randn()
        value = np.random.randn()
        log_prob = np.random.randn()
        done = i == 9  # 最后一步为终止
        
        buffer.add(obs, action, reward, value, log_prob, done)
    
    print(f"缓冲区长度: {len(buffer)}")
    print(f"缓冲区是否为空: {buffer.is_empty()}")
    
    # 获取数据
    observations, actions, rewards, values, log_probs, dones = buffer.get()
    print(f"观测形状: {observations.shape}")
    print(f"动作形状: {actions.shape}")
    print(f"奖励形状: {rewards.shape}")
    
    # 2. 计算 GAE
    print("\n2. GAE 计算示例:")
    advantages, returns = compute_gae(
        rewards=rewards,
        values=values,
        dones=dones,
        gamma=0.99,
        gae_lambda=0.95,
        next_value=0.0,
    )
    
    print(f"优势形状: {advantages.shape}")
    print(f"回报形状: {returns.shape}")
    print(f"优势均值: {advantages.mean():.4f}")
    print(f"优势标准差: {advantages.std():.4f}")
    
    # 3. 标准化优势
    print("\n3. 标准化优势示例:")
    normalized_advantages = normalize_advantages(advantages)
    print(f"标准化前均值: {advantages.mean():.4f}")
    print(f"标准化后均值: {normalized_advantages.mean():.4f}")
    print(f"标准化前标准差: {advantages.std():.4f}")
    print(f"标准化后标准差: {normalized_advantages.std():.4f}")
    
    print()


# 示例5: 使用任务配置模块
def example_task():
    """演示如何使用任务配置"""
    print("=" * 60)
    print("示例 5: 使用任务配置")
    print("=" * 60)
    
    from parnassus_train.tasks import ArmBalanceTask
    
    # 创建任务配置
    task = ArmBalanceTask(
        server_address="localhost:50051",
        num_episodes=1000,
        learning_rate=3e-4,
        gamma=0.99,
        use_wandb=False,
    )
    
    print(f"任务配置:")
    print(f"  服务器地址: {task.server_address}")
    print(f"  训练轮数: {task.num_episodes}")
    print(f"  学习率: {task.learning_rate}")
    print(f"  折扣因子: {task.gamma}")
    print(f"  PPO 裁剪参数: {task.clip_epsilon}")
    
    # 转换为字典
    config_dict = task.to_dict()
    print(f"\n配置字典键: {list(config_dict.keys())}")
    
    # 创建环境
    try:
        env = task.create_env()
        print(f"\n环境创建成功!")
        print(f"  观测空间: {env.observation_space}")
        print(f"  动作空间: {env.action_space}")
        env.close()
    except Exception as e:
        print(f"\n环境创建失败（需要先启动 gRPC 服务器）: {e}")
    
    print()


# 示例6: 完整的训练流程（不实际运行）
def example_training_workflow():
    """演示完整的训练工作流程"""
    print("=" * 60)
    print("示例 6: 完整的训练工作流程（伪代码）")
    print("=" * 60)
    
    code = """
# 1. 创建任务配置
from parnassus_train.tasks import ArmBalanceTask

task = ArmBalanceTask(
    server_address="localhost:50051",
    num_episodes=10000,
    learning_rate=3e-4,
    gamma=0.99,
    use_wandb=True,
    wandb_project="my-project",
)

# 2. 创建训练器
from parnassus_train.trainers import PPOTrainer

trainer = PPOTrainer(task=task)

# 3. 开始训练
trainer.train()

# 训练器会自动处理:
# - 环境创建和管理
# - 模型初始化
# - 数据收集
# - 策略更新
# - 检查点保存
# - W&B 日志记录
"""
    
    print(code)
    print()


def main():
    """运行所有示例"""
    print("\n" + "=" * 60)
    print("Parnassus Train 模块化架构示例")
    print("=" * 60 + "\n")
    
    # 运行各个示例
    # example_env()         # 需要 gRPC 服务器
    example_model()
    example_criterion()
    example_utils()
    example_task()
    example_training_workflow()
    
    print("=" * 60)
    print("所有示例运行完成！")
    print("=" * 60)
    print("\n提示:")
    print("1. 要测试环境模块，请先启动 gRPC 环境服务器")
    print("2. 要进行实际训练，请运行: python tests/test_train/test_train_ppo_modular.py")
    print("3. 查看 MODULAR_ARCHITECTURE.md 了解更多详情")
    print()


if __name__ == "__main__":
    main()
