#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
层级RWKV7模型使用示例
展示如何使用层级模型进行训练和推理
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from typing import Dict, List, Tuple
import time

from hierarchical_rwkv7 import (
    HierarchicalRwkvConfig,
    HierarchicalRwkvForCausalLM,
    DeepSupervisionTrainer
)


def create_sample_data(vocab_size: int = 1000, num_samples: int = 100, seq_len: int = 64) -> DataLoader:
    """创建示例数据集"""
    # 生成随机文本数据
    input_ids = torch.randint(0, vocab_size, (num_samples, seq_len))
    labels = input_ids.clone()
    
    # 标签向右偏移一位（语言模型标准做法）
    labels[:, :-1] = input_ids[:, 1:]
    labels[:, -1] = -100  # 忽略最后一个token的损失
    
    dataset = TensorDataset(input_ids, labels)
    dataloader = DataLoader(dataset, batch_size=4, shuffle=True)
    
    return dataloader


def example_basic_usage():
    """基础使用示例"""
    print("=" * 60)
    print("层级RWKV7模型基础使用示例")
    print("=" * 60)
    
    # 1. 创建配置
    config = HierarchicalRwkvConfig(
        vocab_size=1000,
        hidden_size=256,
        num_hidden_layers=11,
        low_level_layers=4,  # 0-3层作为Low level思考层
        high_level_layers=7,  # 4-10层作为High level思考层
        N_iterations=2,      # 层级迭代次数
        T_interval=2,        # High level更新间隔
        N_supervision=3,     # Deep Supervision步数
        use_hierarchical_state=True,
        state_fusion_method="gate",
        context_length=512
    )
    
    print("模型配置:")
    print(f"  - 词汇表大小: {config.vocab_size}")
    print(f"  - 隐藏层大小: {config.hidden_size}")
    print(f"  - Low level层数: {config.low_level_layers}")
    print(f"  - High level层数: {config.high_level_layers}")
    print(f"  - 层级迭代次数: {config.N_iterations}")
    print(f"  - 更新间隔: {config.T_interval}")
    print(f"  - Deep Supervision步数: {config.N_supervision}")
    
    # 2. 创建模型
    model = HierarchicalRwkvForCausalLM(config)
    print(f"\n模型参数量: {sum(p.numel() for p in model.parameters()):,}")
    
    # 3. 创建示例数据
    batch_size, seq_len = 2, 32
    input_ids = torch.randint(0, config.vocab_size, (batch_size, seq_len))
    labels = input_ids.clone()
    labels[:, :-1] = input_ids[:, 1:]
    labels[:, -1] = -100
    
    print(f"\n输入数据形状: {input_ids.shape}")
    
    # 4. 前向传播
    print("\n执行前向传播...")
    start_time = time.time()
    
    outputs = model(
        input_ids=input_ids,
        labels=labels,
        return_dict=True
    )
    
    forward_time = time.time() - start_time
    
    print(f"前向传播结果:")
    print(f"  - 输出logits形状: {outputs.logits.shape}")
    print(f"  - High level状态形状: {outputs.high_level_state.shape}")
    print(f"  - Low level状态形状: {outputs.low_level_state.shape}")
    print(f"  - 损失值: {outputs.loss.item():.4f}")
    print(f"  - 前向传播时间: {forward_time:.4f}秒")
    
    return model, config


def example_deep_supervision_training():
    """Deep Supervision训练示例"""
    print("\n" + "=" * 60)
    print("Deep Supervision训练示例")
    print("=" * 60)
    
    # 1. 创建模型和配置
    config = HierarchicalRwkvConfig(
        vocab_size=1000,
        hidden_size=128,
        num_hidden_layers=11,
        low_level_layers=4,
        high_level_layers=7,
        N_iterations=2,
        T_interval=2,
        N_supervision=3,
        use_hierarchical_state=True,
        state_fusion_method="gate"
    )
    
    model = HierarchicalRwkvForCausalLM(config)
    trainer = DeepSupervisionTrainer(model, config)
    
    # 2. 创建优化器
    optimizer = optim.AdamW(model.parameters(), lr=1e-4)
    
    # 3. 创建训练数据
    dataloader = create_sample_data(vocab_size=config.vocab_size, num_samples=50, seq_len=32)
    
    print(f"训练配置:")
    print(f"  - 数据集大小: {len(dataloader.dataset)}")
    print(f"  - 批次大小: {dataloader.batch_size}")
    print(f"  - Deep Supervision步数: {config.N_supervision}")
    
    # 4. 训练循环
    model.train()
    num_epochs = 3
    
    print(f"\n开始训练 {num_epochs} 个epoch...")
    
    for epoch in range(num_epochs):
        epoch_losses = []
        epoch_times = []
        
        for batch_idx, (input_ids, labels) in enumerate(dataloader):
            start_time = time.time()
            
            # Deep Supervision训练步骤
            train_stats = trainer.train_step(input_ids, labels, optimizer)
            
            batch_time = time.time() - start_time
            epoch_losses.append(train_stats['avg_loss'])
            epoch_times.append(batch_time)
            
            if batch_idx % 5 == 0:
                print(f"Epoch {epoch+1}/{num_epochs}, Batch {batch_idx+1}/{len(dataloader)}: "
                      f"Loss={train_stats['avg_loss']:.4f}, Time={batch_time:.4f}s")
        
        avg_epoch_loss = np.mean(epoch_losses)
        avg_epoch_time = np.mean(epoch_times)
        
        print(f"Epoch {epoch+1} 完成: 平均损失={avg_epoch_loss:.4f}, 平均时间={avg_epoch_time:.4f}s")
    
    print("训练完成！")
    
    return model


def example_inference():
    """推理示例"""
    print("\n" + "=" * 60)
    print("推理示例")
    print("=" * 60)
    
    # 1. 创建模型
    config = HierarchicalRwkvConfig(
        vocab_size=1000,
        hidden_size=128,
        num_hidden_layers=11,
        low_level_layers=4,
        high_level_layers=7,
        N_iterations=2,
        T_interval=2,
        N_supervision=1,  # 推理时不需要Deep Supervision
        use_hierarchical_state=True,
        state_fusion_method="gate"
    )
    
    model = HierarchicalRwkvForCausalLM(config)
    model.eval()
    
    # 2. 生成文本
    print("开始文本生成...")
    
    # 初始输入
    input_ids = torch.tensor([[1, 2, 3, 4, 5]])  # 示例输入序列
    max_length = 20
    
    print(f"初始输入: {input_ids.tolist()}")
    
    # 逐步生成
    generated_ids = input_ids.clone()
    
    with torch.no_grad():
        for step in range(max_length - input_ids.size(1)):
            # 前向传播
            outputs = model(
                input_ids=generated_ids,
                return_dict=True
            )
            
            # 获取下一个token的logits
            next_token_logits = outputs.logits[:, -1, :]
            
            # 选择下一个token（这里使用贪心策略）
            next_token = torch.argmax(next_token_logits, dim=-1, keepdim=True)
            
            # 添加到生成序列
            generated_ids = torch.cat([generated_ids, next_token], dim=1)
            
            print(f"步骤 {step+1}: 生成token {next_token.item()}")
    
    print(f"完整生成序列: {generated_ids.tolist()}")
    
    return generated_ids


def example_state_analysis():
    """状态分析示例"""
    print("\n" + "=" * 60)
    print("层级状态分析示例")
    print("=" * 60)
    
    # 1. 创建模型
    config = HierarchicalRwkvConfig(
        vocab_size=1000,
        hidden_size=128,
        num_hidden_layers=11,
        low_level_layers=4,
        high_level_layers=7,
        N_iterations=2,
        T_interval=2,
        N_supervision=1,
        use_hierarchical_state=True,
        state_fusion_method="gate"
    )
    
    model = HierarchicalRwkvForCausalLM(config)
    model.eval()
    
    # 2. 创建测试数据
    input_ids = torch.randint(0, config.vocab_size, (1, 16))
    labels = input_ids.clone()
    labels[:, :-1] = input_ids[:, 1:]
    labels[:, -1] = -100
    
    print(f"输入序列长度: {input_ids.size(1)}")
    
    # 3. 分析层级状态
    with torch.no_grad():
        outputs = model(
            input_ids=input_ids,
            labels=labels,
            return_dict=True
        )
    
    zH = outputs.high_level_state
    zL = outputs.low_level_state
    
    print(f"\n层级状态分析:")
    print(f"  - High level状态形状: {zH.shape}")
    print(f"  - Low level状态形状: {zL.shape}")
    
    # 计算状态统计信息
    zH_norm = torch.norm(zH, dim=-1).mean().item()
    zL_norm = torch.norm(zL, dim=-1).mean().item()
    zH_std = torch.std(zH).item()
    zL_std = torch.std(zL).item()
    
    print(f"  - High level状态范数: {zH_norm:.4f}")
    print(f"  - Low level状态范数: {zL_norm:.4f}")
    print(f"  - High level状态标准差: {zH_std:.4f}")
    print(f"  - Low level状态标准差: {zL_std:.4f}")
    
    # 计算层级间的相关性
    zH_flat = zH.view(-1, zH.size(-1))
    zL_flat = zL.view(-1, zL.size(-1))
    
    correlation = torch.corrcoef(torch.stack([zH_flat.mean(dim=0), zL_flat.mean(dim=0)]))[0, 1].item()
    print(f"  - 层级间相关性: {correlation:.4f}")
    
    # 分析状态变化
    zH_diff = torch.norm(zH[:, 1:] - zH[:, :-1], dim=-1).mean().item()
    zL_diff = torch.norm(zL[:, 1:] - zL[:, :-1], dim=-1).mean().item()
    
    print(f"  - High level状态变化: {zH_diff:.4f}")
    print(f"  - Low level状态变化: {zL_diff:.4f}")
    
    return zH, zL


def example_different_fusion_methods():
    """不同状态融合方法示例"""
    print("\n" + "=" * 60)
    print("不同状态融合方法示例")
    print("=" * 60)
    
    fusion_methods = ["add", "concat", "gate"]
    results = {}
    
    for method in fusion_methods:
        print(f"\n测试融合方法: {method}")
        
        # 创建配置
        config = HierarchicalRwkvConfig(
            vocab_size=1000,
            hidden_size=128,
            num_hidden_layers=11,
            low_level_layers=4,
            high_level_layers=7,
            N_iterations=2,
            T_interval=2,
            N_supervision=1,
            use_hierarchical_state=True,
            state_fusion_method=method
        )
        
        model = HierarchicalRwkvForCausalLM(config)
        model.eval()
        
        # 创建测试数据
        input_ids = torch.randint(0, config.vocab_size, (2, 16))
        labels = input_ids.clone()
        labels[:, :-1] = input_ids[:, 1:]
        labels[:, -1] = -100
        
        # 前向传播
        start_time = time.time()
        with torch.no_grad():
            outputs = model(
                input_ids=input_ids,
                labels=labels,
                return_dict=True
            )
        forward_time = time.time() - start_time
        
        results[method] = {
            "loss": outputs.loss.item(),
            "time": forward_time,
            "zH_norm": torch.norm(outputs.high_level_state).item(),
            "zL_norm": torch.norm(outputs.low_level_state).item()
        }
        
        print(f"  - 损失值: {outputs.loss.item():.4f}")
        print(f"  - 前向传播时间: {forward_time:.4f}秒")
        print(f"  - High level状态范数: {results[method]['zH_norm']:.4f}")
        print(f"  - Low level状态范数: {results[method]['zL_norm']:.4f}")
    
    # 比较结果
    print(f"\n融合方法比较:")
    print(f"{'方法':<8} {'损失':<8} {'时间(秒)':<10} {'zH范数':<8} {'zL范数':<8}")
    print("-" * 50)
    for method, result in results.items():
        print(f"{method:<8} {result['loss']:<8.4f} {result['time']:<10.4f} "
              f"{result['zH_norm']:<8.4f} {result['zL_norm']:<8.4f}")
    
    return results


def main():
    """主函数"""
    print("层级RWKV7模型使用示例")
    print("=" * 60)
    
    # 设置随机种子
    torch.manual_seed(42)
    np.random.seed(42)
    
    try:
        # 1. 基础使用示例
        model, config = example_basic_usage()
        
        # 2. Deep Supervision训练示例
        trained_model = example_deep_supervision_training()
        
        # 3. 推理示例
        generated_sequence = example_inference()
        
        # 4. 状态分析示例
        zH, zL = example_state_analysis()
        
        # 5. 不同融合方法示例
        fusion_results = example_different_fusion_methods()
        
        print("\n" + "=" * 60)
        print("🎉 所有示例运行完成！")
        print("=" * 60)
        
        print("\n使用总结:")
        print("✓ 层级RWKV7模型支持Low/High level思考层")
        print("✓ 支持HRM层级模型结构")
        print("✓ 支持Deep Supervision训练")
        print("✓ 支持多种状态融合方法")
        print("✓ 支持文本生成和状态分析")
        print("✓ 模型性能良好，易于使用")
        
    except Exception as e:
        print(f"❌ 示例运行失败: {str(e)}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()
