#!/usr/bin/env python3
"""
Triton优化训练脚本
使用自定义Triton kernels替换关键算子
"""

import os
import sys
import json
import time
import argparse
from pathlib import Path
from typing import Dict, Any, Optional

import torch
import torch.nn as nn
import torch.distributed as dist
from torch.utils.data import DataLoader, DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP

from transformers import (
    GPT2LMHeadModel,
    GPT2Config,
    GPT2Tokenizer,
    get_linear_schedule_with_warmup,
    set_seed
)
from datasets import load_dataset, load_from_disk
import numpy as np
from tqdm import tqdm

# 添加项目根目录到路径（triton_kernels在根目录）
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
sys.path.insert(0, project_root)
from triton_kernels.attention import TritonGPT2Attention
from triton_kernels.layer_norm import TritonLayerNorm
from triton_kernels.mlp import TritonGPT2MLP

# 复用PyTorch脚本的PerformanceMonitor（同目录）
sys.path.insert(0, os.path.dirname(__file__))
from train_pytorch import PerformanceMonitor, setup_distributed, load_config, prepare_dataset


def replace_modules_with_triton(model, config: Dict[str, Any]):
    """
    将模型中的标准模块替换为Triton优化版本
    """
    triton_config = config.get("triton", {})
    model_config = model.config  # 获取模型配置
    
    if not triton_config.get("use_triton_kernels", True):
        print("Triton kernels disabled in config")
        return model
    
    num_attention_replaced = 0
    num_layernorm_replaced = 0
    num_mlp_replaced = 0
    
    # 遍历所有模块并替换
    for name, module in model.named_modules():
        # 替换Attention
        if triton_config.get("enable_attention", True) and "attn" in name and hasattr(module, "c_attn"):
            parent_name = ".".join(name.split(".")[:-1])
            parent = model.get_submodule(parent_name) if parent_name else model
            
            # 创建Triton版本的Attention
            triton_attn = TritonGPT2Attention(model_config)
                    
            # 转置权重（必须的，因为PyTorch Linear层的权重格式）
            triton_attn.c_attn.weight.data = module.c_attn.weight.data.t().clone()
            triton_attn.c_attn.bias.data = module.c_attn.bias.data.clone()
            triton_attn.c_proj.weight.data = module.c_proj.weight.data.t().clone()
            triton_attn.c_proj.bias.data = module.c_proj.bias.data.clone()
            
            # 复制缓冲区（如果存在）
            if hasattr(module, 'bias'):
                triton_attn.bias.data = module.bias.data.clone()
            if hasattr(module, 'masked_bias'):
                triton_attn.masked_bias.data = module.masked_bias.data.clone()
            
            setattr(parent, name.split(".")[-1], triton_attn)
            num_attention_replaced += 1
        
        # 替换LayerNorm
        if triton_config.get("enable_layer_norm", True) and isinstance(module, nn.LayerNorm):
            parent_name = ".".join(name.split(".")[:-1])
            parent = model.get_submodule(parent_name) if parent_name else model
            
            # 创建Triton版本的LayerNorm
            triton_ln = TritonLayerNorm(module.normalized_shape[0], eps=module.eps)
            triton_ln.weight.data = module.weight.data.clone()
            triton_ln.bias.data = module.bias.data.clone()
            
            setattr(parent, name.split(".")[-1], triton_ln)
            num_layernorm_replaced += 1
        
        # 替换MLP
        if triton_config.get("enable_mlp", True) and "mlp" in name and hasattr(module, "c_fc"):
            parent_name = ".".join(name.split(".")[:-1])
            parent = model.get_submodule(parent_name) if parent_name else model
            
            # 创建Triton版本的MLP，传递enable_gelu配置
            enable_gelu = triton_config.get("enable_gelu", True)
            triton_mlp = TritonGPT2MLP(model_config, enable_gelu=enable_gelu)
            
            # 转置权重（必须的，因为PyTorch Linear层的权重格式）
            triton_mlp.c_fc.weight.data = module.c_fc.weight.data.t().clone()
            triton_mlp.c_fc.bias.data = module.c_fc.bias.data.clone()
            triton_mlp.c_proj.weight.data = module.c_proj.weight.data.t().clone()
            triton_mlp.c_proj.bias.data = module.c_proj.bias.data.clone()
            
            setattr(parent, name.split(".")[-1], triton_mlp)
            num_mlp_replaced += 1
    
    print(f"\nTriton Module Replacement Summary:")
    print(f"  Attention modules replaced: {num_attention_replaced}")
    print(f"  LayerNorm modules replaced: {num_layernorm_replaced}")
    print(f"  MLP modules replaced: {num_mlp_replaced}")
    print()
    
    return model


def train(config: Dict[str, Any], rank: int, world_size: int, local_rank: int):
    """训练函数"""
    
    # 设置随机种子
    set_seed(42)
    
    # 加载tokenizer和模型
    model_config = config["model"]
    print(f"Loading model from {model_config['model_path']}")
    
    tokenizer = GPT2Tokenizer.from_pretrained(model_config["model_path"])
    tokenizer.pad_token = tokenizer.eos_token
    
    model = GPT2LMHeadModel.from_pretrained(model_config["model_path"])
    
    # 替换为Triton kernels
    if model_config.get("use_triton_kernels", True):
        print("Replacing modules with Triton kernels...")
        model = replace_modules_with_triton(model, config)
    
    # 移动到GPU
    device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    
    # 包装为DDP
    if world_size > 1:
        model = DDP(model, device_ids=[local_rank], output_device=local_rank, 
                   find_unused_parameters=True)  # 允许未使用的参数（Triton kernels可能不总是执行）
    
    # 准备数据集
    dataset = prepare_dataset(config, tokenizer)
    train_dataset = dataset["train"]
    
    # 创建DataLoader
    training_config = config["training"]
    batch_size = training_config["per_device_train_batch_size"]
    
    sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank) if world_size > 1 else None
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        sampler=sampler,
        shuffle=(sampler is None),
        num_workers=training_config.get("dataloader_num_workers", 4),
        pin_memory=training_config.get("dataloader_pin_memory", True),
    )
    
    # 优化器
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=training_config["learning_rate"],
        betas=(training_config["adam_beta1"], training_config["adam_beta2"]),
        eps=training_config["adam_epsilon"],
        weight_decay=training_config["weight_decay"]
    )
    
    # 学习率调度器
    num_epochs = training_config["num_train_epochs"]
    num_training_steps = len(train_loader) * num_epochs
    num_warmup_steps = int(num_training_steps * training_config["warmup_ratio"])
    
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=num_warmup_steps,
        num_training_steps=num_training_steps
    )
    
    # 性能监控
    perf_monitor = None
    if rank == 0 and config["performance"]["enable_monitoring"]:
        perf_monitor = PerformanceMonitor(
            log_interval=config["performance"]["log_interval"],
            output_dir=training_config["output_dir"]
        )
    
    # 混合精度训练
    use_amp = training_config.get("bf16", False) or training_config.get("fp16", False)
    scaler = torch.cuda.amp.GradScaler() if use_amp and training_config.get("fp16", False) else None
    
    # 训练循环
    model.train()
    global_step = 0
    
    for epoch in range(num_epochs):
        if sampler:
            sampler.set_epoch(epoch)
        
        epoch_iterator = tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs}", disable=rank!=0)
        
        for step, batch in enumerate(epoch_iterator):
            # 移动数据到GPU
            batch = {k: v.to(device) for k, v in batch.items()}
            
            if perf_monitor:
                perf_monitor.start_step()
            
            # 前向传播
            if use_amp:
                with torch.cuda.amp.autocast(dtype=torch.bfloat16 if training_config.get("bf16") else torch.float16):
                    outputs = model(**batch)
                    loss = outputs.loss
            else:
                outputs = model(**batch)
                loss = outputs.loss
            
            # 反向传播
            if scaler:
                scaler.scale(loss).backward()
                scaler.unscale_(optimizer)
                torch.nn.utils.clip_grad_norm_(model.parameters(), training_config["max_grad_norm"])
                scaler.step(optimizer)
                scaler.update()
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), training_config["max_grad_norm"])
                optimizer.step()
            
            scheduler.step()
            optimizer.zero_grad()
            
            # 记录性能
            if perf_monitor:
                seq_length = batch["input_ids"].shape[1]
                perf_monitor.end_step(loss.item(), batch_size, seq_length, global_step)
            
            global_step += 1
            
            # 保存检查点
            if rank == 0 and global_step % training_config["save_steps"] == 0:
                save_dir = Path(training_config["output_dir"]) / f"checkpoint-{global_step}"
                save_dir.mkdir(parents=True, exist_ok=True)
                
                model_to_save = model.module if hasattr(model, 'module') else model
                model_to_save.save_pretrained(save_dir)
                tokenizer.save_pretrained(save_dir)
                print(f"Checkpoint saved to {save_dir}")
    
    # 保存最终模型
    if rank == 0:
        final_dir = Path(training_config["output_dir"]) / "final"
        final_dir.mkdir(parents=True, exist_ok=True)
        
        model_to_save = model.module if hasattr(model, 'module') else model
        model_to_save.save_pretrained(final_dir, safe_serialization=False)
        tokenizer.save_pretrained(final_dir)
        
        if perf_monitor:
            # 使用配置文件中的metrics_file参数
            metrics_file = config["performance"].get("metrics_file", "metrics.json")
            perf_monitor.save_metrics(metrics_file)
        
        print(f"Training completed! Model saved to {final_dir}")
    
    # 清理分布式
    if world_size > 1:
        dist.destroy_process_group()


def main():
    parser = argparse.ArgumentParser(description="Triton Optimized Training")
    parser.add_argument("--config", type=str, default="config_triton.json",
                       help="Path to config file")
    args = parser.parse_args()
    
    # 加载配置
    config = load_config(args.config)
    
    # 设置分布式
    rank, world_size, local_rank = setup_distributed()
    
    if rank == 0:
        print("="*60)
        print("Triton Optimized Training")
        print("="*60)
        print(f"World size: {world_size}")
        print(f"Model: {config['model']['name']}")
        print(f"Output: {config['training']['output_dir']}")
        print(f"Triton kernels enabled: {config['model'].get('use_triton_kernels', True)}")
        print("="*60)
    
    # 开始训练
    train(config, rank, world_size, local_rank)


if __name__ == "__main__":
    main()

