#!/usr/bin/env python3
"""
增强版预训练脚本 - 支持DDP、FSDP、混合精度等
"""

import os
import sys
import argparse
import time
import math
import warnings
import torch
import torch.distributed as dist
from torch import optim, nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, DistributedSampler
from contextlib import nullcontext

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from simple_model import SimpleLLMForCausalLM
from hf_tokenizer import MiniMindTokenizer
from simple_dataset import SimplePretrainDataset
from model_config import get_minimind2_small_config, get_minimind2_config, get_minimind2_moe_config
from advanced_trainer import AdvancedTrainer

warnings.filterwarnings('ignore')

def Logger(content, ddp=False):
    """分布式日志"""
    if not ddp or not dist.is_initialized() or dist.get_rank() == 0:
        print(content)

def get_lr(current_step, total_steps, lr):
    """余弦学习率调度"""
    return lr / 10 + 0.5 * lr * (1 + math.cos(math.pi * current_step / total_steps))

def train_epoch(model, optimizer, scaler, train_loader, epoch, args, config, tokenizer, wandb=None):
    """训练一个epoch"""
    model.train()
    loss_fct = nn.CrossEntropyLoss(reduction='none')
    start_time = time.time()
    iter_per_epoch = len(train_loader)
    
    # 设置混合精度上下文
    device_type = "cuda" if "cuda" in str(args.device) else "cpu"
    ctx = nullcontext() if device_type == "cpu" else torch.cuda.amp.autocast()
    total_loss = 0
    
    for step, batch in enumerate(train_loader):
        # 移动数据到设备
        input_ids = batch['input_ids'].to(args.device)
        labels = batch['labels'].to(args.device)
        
        # 学习率调度
        lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch, args.learning_rate)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        
        # 前向传播
        with ctx:
            outputs = model(input_ids=input_ids, labels=labels)
            loss = outputs['loss']
            loss = loss / args.accumulation_steps
        
        # 反向传播
        scaler.scale(loss).backward()
        
        if (step + 1) % args.accumulation_steps == 0:
            scaler.unscale_(optimizer)
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
            
            scaler.step(optimizer)
            total_loss+=(loss.item()*args.accumulation_steps)
            scaler.update()
            optimizer.zero_grad(set_to_none=True)
        
        # 日志
        if step % args.log_interval == 0:
            spend_time = time.time() - start_time
            avg = total_loss / (step if step!=0 else 1)
            ddp = int(os.environ.get("RANK", -1)) != -1
            Logger(f'Epoch:[{epoch+1}/{args.epochs}]({step}/{iter_per_epoch}) '
                  f'loss:{loss.item()*args.accumulation_steps:.4f} '
                  f'avg_loss:{avg} '
                  f'lr:{lr:.8f} '
                  f'time:{spend_time/(step+1)*iter_per_epoch//60:.1f}min', ddp)
            
            if wandb and (not ddp or not dist.is_initialized() or dist.get_rank() == 0):
                wandb.log({
                    "loss": loss.item() * args.accumulation_steps,
                    "lr": lr,
                    "epoch": epoch,
                    "step": step
                })
        
        # 保存检查点
        if (step + 1) % args.save_interval == 0 and (not ddp or not dist.is_initialized() or dist.get_rank() == 0):
            model.eval()
            checkpoint_dir = os.path.join(args.output_dir, "pretrain_checkpoints")
            os.makedirs(checkpoint_dir, exist_ok=True)
            
            # 获取模型状态字典
            if isinstance(model, DistributedDataParallel):
                state_dict = model.module.state_dict()
                unwrapped_model = model.module
            else:
                state_dict = model.state_dict()
                unwrapped_model = model
            
            # 保存检查点（兼容HuggingFace格式）
            checkpoint_path = os.path.join(checkpoint_dir, f"checkpoint-{step}")
            os.makedirs(checkpoint_path, exist_ok=True)
            
            # 保存模型权重
            torch.save(state_dict, os.path.join(checkpoint_path, "pytorch_model.bin"))
            
            # 保存配置
            import json
            config_dict = {
                "model_type": "llm",
                "config": {
                    k: v for k, v in config.__dict__.items() 
                    if not k.startswith('_') and not callable(v)
                },
                "training_args": {
                    k: str(v) if hasattr(v, '__class__') and 'device' in str(type(v)) else v
                    for k, v in vars(args).items()
                }
            }
            with open(os.path.join(checkpoint_path, "config.json"), "w") as f:
                json.dump(config_dict, f, indent=2)
            
            # 保存分词器
            tokenizer.save_pretrained(checkpoint_path)
            
            # 保存优化器状态（可选）
            optimizer_path = os.path.join(checkpoint_path, "optimizer.pt")
            torch.save(optimizer.state_dict(), optimizer_path)
            
            Logger(f"检查点已保存到: {checkpoint_path}")
            model.train()

def init_model(config, args):
    """初始化模型和分词器"""
    tokenizer = MiniMindTokenizer()
    model = SimpleLLMForCausalLM(config)
    model = model.to(args.device)
    
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    ddp = int(os.environ.get("RANK", -1)) != -1
    Logger(f'模型总参数量: {total_params / 1e6:.1f}M', ddp)
    Logger(f'可训练参数量: {trainable_params / 1e6:.1f}M', ddp)
    
    return model, tokenizer

def init_distributed_mode():
    """初始化分布式训练"""
    dist.init_process_group(backend="nccl")
    local_rank = int(os.environ["LOCAL_RANK"])
    torch.cuda.set_device(local_rank)

def setup_fsdp(model, args):
    """设置FSDP（如果可用）"""
    try:
        from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
        from torch.distributed.fsdp import MixedPrecision
        from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
        
        # 定义自动包装策略
        auto_wrap_policy = transformer_auto_wrap_policy(
            transformer_layer_cls={torch.nn.TransformerEncoderLayer}
        )
        
        # 混合精度配置
        mixed_precision_policy = MixedPrecision(
            param_dtype=torch.bfloat16,
            reduce_dtype=torch.bfloat16,
            buffer_dtype=torch.bfloat16,
        )
        
        # 创建FSDP模型
        model = FSDP(
            model,
            auto_wrap_policy=auto_wrap_policy,
            mixed_precision=mixed_precision_policy,
            device_id=torch.cuda.current_device(),
        )
        
        Logger("FSDP已启用")
        return model
    except ImportError:
        Logger("FSDP不可用，使用标准DDP")
        return model

def main():
    parser = argparse.ArgumentParser(description="增强版预训练")
    parser.add_argument("--output_dir", type=str, default="./pretrain_output")
    parser.add_argument("--data_path", type=str, 
                       default="/Users/sd/Desktop/mycode/myalgo/milvus/minimind/my_llm_implementation/datasets/pretrain_hq.jsonl")
    parser.add_argument("--config", type=str, default="minimind2", 
                       choices=["minimind2-small", "minimind2", "minimind2-moe"])
    parser.add_argument("--epochs", type=int, default=1)
    parser.add_argument("--batch_size", type=int, default=2)
    parser.add_argument("--learning_rate", type=float, default=5e-4)
    parser.add_argument("--device", type=str, default="cuda:0" if torch.cuda.is_available() else "cpu")
    parser.add_argument("--dtype", type=str, default="float")
    parser.add_argument("--use_wandb", action="store_true")
    parser.add_argument("--wandb_project", type=str, default="MiniMind-Pretrain-Advanced")
    parser.add_argument("--num_workers", type=int, default=1)
    parser.add_argument("--ddp", action="store_true")
    parser.add_argument("--fsdp", action="store_true")
    parser.add_argument("--accumulation_steps", type=int, default=8)
    parser.add_argument("--grad_clip", type=float, default=1.0)
    parser.add_argument("--warmup_iters", type=int, default=0)
    parser.add_argument("--log_interval", type=int, default=100)
    parser.add_argument("--save_interval", type=int, default=1000)
    parser.add_argument("--max_seq_len", type=int, default=512)
    parser.add_argument("--local_rank", type=int, default=-1)
    
    args = parser.parse_args()
    
    # 设置设备
    if args.ddp and int(os.environ.get("RANK", -1)) != -1:
        init_distributed_mode()
        local_rank = int(os.environ["LOCAL_RANK"])
        args.device = f"cuda:{local_rank}"
    
    args.device = torch.device(args.device)
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 设置随机种子
    base_seed = 1337
    torch.manual_seed(base_seed)
    torch.cuda.manual_seed(base_seed)
    
    # 检查是否在分布式环境中
    is_distributed = dist.is_initialized()
    
    if args.ddp and is_distributed:
        rank = dist.get_rank()
        torch.manual_seed(base_seed + rank)
        torch.cuda.manual_seed(base_seed + rank)
    
    # 初始化WandB
    wandb = None
    if args.use_wandb and (not args.ddp or not is_distributed or dist.get_rank() == 0):
        import wandb
        wandb.init(project=args.wandb_project, name=f"pretrain-{args.config}")
    
    # 加载配置
    config_map = {
        "minimind2-small": get_minimind2_small_config,
        "minimind2": get_minimind2_config,
        "minimind2-moe": get_minimind2_moe_config
    }
    config = config_map[args.config]()
    
    # 初始化模型
    model, tokenizer = init_model(config, args)
    
    # 创建数据集
    train_ds = SimplePretrainDataset(
        data_path=args.data_path,
        tokenizer=tokenizer,
        max_length=args.max_seq_len
    )
    
    # 创建数据加载器
    train_sampler = DistributedSampler(train_ds) if args.ddp else None
    train_loader = DataLoader(
        train_ds,
        batch_size=args.batch_size,
        pin_memory=True,
        drop_last=False,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=train_sampler
    )
    
    # 设置优化器
    optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate)
    
    # 设置梯度缩放器
    scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16', 'bfloat16']))
    
    # 设置分布式训练
    if args.ddp:
        model._ddp_params_and_buffers_to_ignore = {"pos_cis"}
        model = DistributedDataParallel(model, device_ids=[args.device.index])
    elif args.fsdp:
        model = setup_fsdp(model, args)
    
    # 开始训练
    Logger(f"开始预训练，共{args.epochs}个epoch")
    for epoch in range(args.epochs):
        if args.ddp:
            train_sampler.set_epoch(epoch)
        train_epoch(model, optimizer, scaler, train_loader, epoch, args, config, tokenizer, wandb)
    
    # 保存最终模型
    if not args.ddp or not dist.is_initialized() or dist.get_rank() == 0:
        final_model_path = os.path.join(args.output_dir, "final_model")
        os.makedirs(final_model_path, exist_ok=True)
        
        if isinstance(model, DistributedDataParallel):
            state_dict = model.module.state_dict()
        else:
            state_dict = model.state_dict()
        
        torch.save(state_dict, os.path.join(final_model_path, "pytorch_model.bin"))
        
        # 保存配置
        import json
        # 创建可序列化的配置字典
        config_dict = {
            "model_type": "llm",
            "config": {
                k: v for k, v in config.__dict__.items() 
                if not k.startswith('_') and not callable(v)
            },
            "training_args": {
                k: str(v) if hasattr(v, '__class__') and 'device' in str(type(v)) else v
                for k, v in vars(args).items()
            }
        }
        with open(os.path.join(final_model_path, "config.json"), "w") as f:
            json.dump(config_dict, f, indent=2)
        
        # 保存分词器
        tokenizer.save_pretrained(final_model_path)
        
        Logger(f"最终模型已保存到: {final_model_path}")
    
    if wandb:
        wandb.finish()

if __name__ == "__main__":
    main()
