#!/usr/bin/env python3
"""
完全模仿官方实现的预训练脚本
"""

import os
import sys
import argparse
import time
import math
import warnings
import torch
import torch.distributed as dist
from torch import optim, nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, DistributedSampler
from contextlib import nullcontext
import json

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from simple_model import SimpleLLMForCausalLM
from hf_tokenizer import MiniMindTokenizer
from simple_dataset import SimplePretrainDataset
from model_config import get_minimind2_small_config, get_minimind2_config, get_minimind2_moe_config

warnings.filterwarnings('ignore')

def Logger(content, ddp=False):
    """分布式日志"""
    if not ddp or not dist.is_initialized() or dist.get_rank() == 0:
        print(content)

def get_lr(current_step, total_steps, lr):
    """学习率调度 - 与官方完全一致"""
    return lr / 10 + 0.5 * lr * (1 + math.cos(math.pi * current_step / total_steps))

def train_epoch(epoch, wandb, model, optimizer, scaler, train_loader, args, config, tokenizer):
    """训练epoch - 完全模仿官方实现"""
    loss_fct = nn.CrossEntropyLoss(reduction='none')
    start_time = time.time()
    iter_per_epoch = len(train_loader)
    
    # 设置混合精度上下文
    device_type = "cuda" if "cuda" in str(args.device) else "cpu"
    ctx = nullcontext() if device_type == "cpu" else torch.cuda.amp.autocast()
    
    for step, batch in enumerate(train_loader):
        # 获取数据 - 与官方一致
        X = batch['input_ids'].to(args.device)
        Y = batch['labels'].to(args.device)
        loss_mask = batch['loss_mask'].to(args.device)
        
        # 学习率调度
        lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch, args.learning_rate)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        
        # 前向传播 - 与官方完全一致
        with ctx:
            # 只传input_ids，不传labels
            res = model(X)
            
            # 手动计算损失，使用loss_mask
            loss = loss_fct(
                res['logits'].view(-1, res['logits'].size(-1)),
                Y.view(-1)
            ).view(Y.size())
            
            # 应用loss_mask
            loss = (loss * loss_mask).sum() / loss_mask.sum()
            
            # 添加aux_loss（如果有的话）
            if 'aux_loss' in res and res['aux_loss'] is not None:
                loss += res['aux_loss']
            
            loss = loss / args.accumulation_steps
        
        # 反向传播
        scaler.scale(loss).backward()
        
        if (step + 1) % args.accumulation_steps == 0:
            scaler.unscale_(optimizer)
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
            
            scaler.step(optimizer)
            scaler.update()
            optimizer.zero_grad(set_to_none=True)
        
        # 日志 - 与官方格式完全一致
        if step % args.log_interval == 0:
            spend_time = time.time() - start_time
            ddp = int(os.environ.get("RANK", -1)) != -1
            Logger(
                'Epoch:[{}/{}]({}/{}) loss:{:.3f} lr:{:.12f} epoch_Time:{}min:'.format(
                    epoch + 1,
                    args.epochs,
                    step,
                    iter_per_epoch,
                    loss.item() * args.accumulation_steps,
                    optimizer.param_groups[-1]['lr'],
                    spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60), ddp)
            
            if (wandb is not None) and (not ddp or not dist.is_initialized() or dist.get_rank() == 0):
                wandb.log({"loss": loss.item() * args.accumulation_steps,
                           "lr": optimizer.param_groups[-1]['lr'],
                           "epoch_Time": spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60})
        
        # 保存检查点 - 与官方格式一致
        if (step + 1) % args.save_interval == 0 and (not ddp or not dist.is_initialized() or dist.get_rank() == 0):
            model.eval()
            moe_path = '_moe' if config.use_moe else ''
            ckp = f'{args.output_dir}/pretrain_{config.hidden_size}{moe_path}.pth'
            
            if isinstance(model, torch.nn.parallel.DistributedDataParallel):
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            
            # 半精度保存 - 与官方一致
            state_dict = {k: v.half() for k, v in state_dict.items()}
            torch.save(state_dict, ckp)
            Logger(f"检查点已保存到: {ckp}")
            model.train()

def init_model(config, args):
    """初始化模型和分词器 - 与官方一致"""
    tokenizer = MiniMindTokenizer()
    model = SimpleLLMForCausalLM(config).to(args.device)
    
    total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    ddp = int(os.environ.get("RANK", -1)) != -1
    Logger(f'LLM可训练总参数量：{total_params / 1e6:.3f} 百万', ddp)
    
    return model, tokenizer

def init_distributed_mode():
    """初始化分布式训练"""
    dist.init_process_group(backend="nccl")
    local_rank = int(os.environ["LOCAL_RANK"])
    torch.cuda.set_device(local_rank)
    return local_rank

def main():
    parser = argparse.ArgumentParser(description="完全模仿官方实现的预训练脚本")
    
    # 数据参数
    parser.add_argument("--data_path", type=str, default="./datasets/better_pretrain_data.jsonl", help="预训练数据路径")
    parser.add_argument("--output_dir", type=str, default="./pretrain_official_output", help="输出目录")
    parser.add_argument("--config", type=str, default="minimind2-small", choices=["minimind2-small", "minimind2", "minimind2-moe"], help="模型配置")
    
    # 训练参数 - 与官方完全一致
    parser.add_argument("--batch_size", type=int, default=32, help="批次大小")
    parser.add_argument("--max_seq_len", type=int, default=512, help="最大序列长度")
    parser.add_argument("--epochs", type=int, default=1, help="训练轮数")
    parser.add_argument("--learning_rate", type=float, default=5.5e-4, help="学习率")
    parser.add_argument("--accumulation_steps", type=int, default=8, help="梯度累积步数")
    parser.add_argument("--grad_clip", type=float, default=1.0, help="梯度裁剪")
    parser.add_argument("--dtype", type=str, default="bfloat16", choices=["float32", "float16", "bfloat16"], help="数据类型")
    
    # 保存和日志参数
    parser.add_argument("--save_interval", type=int, default=1000, help="保存间隔")
    parser.add_argument("--log_interval", type=int, default=100, help="日志间隔")
    parser.add_argument("--num_workers", type=int, default=4, help="数据加载器工作进程数")
    
    # 分布式训练参数
    parser.add_argument("--ddp", action="store_true", help="使用DDP")
    
    # WandB参数
    parser.add_argument("--use_wandb", action="store_true", help="使用WandB")
    parser.add_argument("--wandb_project", type=str, default="minimind-pretrain", help="WandB项目名")
    
    args = parser.parse_args()
    
    # 设置设备
    if torch.cuda.is_available():
        if args.ddp:
            local_rank = init_distributed_mode()
            args.device = torch.device(f"cuda:{local_rank}")
        else:
            args.device = torch.device("cuda:0")
    else:
        args.device = torch.device("cpu")
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 设置随机种子
    base_seed = 1337
    torch.manual_seed(base_seed)
    torch.cuda.manual_seed(base_seed)
    
    # 检查是否在分布式环境中
    is_distributed = dist.is_initialized()
    
    if args.ddp and is_distributed:
        rank = dist.get_rank()
        torch.manual_seed(base_seed + rank)
        torch.cuda.manual_seed(base_seed + rank)
    
    # 初始化WandB
    wandb = None
    if args.use_wandb and (not args.ddp or not is_distributed or dist.get_rank() == 0):
        import wandb
        wandb.init(project=args.wandb_project, name=f"pretrain-official-{args.config}")
    
    # 加载配置
    config_map = {
        "minimind2-small": get_minimind2_small_config,
        "minimind2": get_minimind2_config,
        "minimind2-moe": get_minimind2_moe_config
    }
    config = config_map[args.config]()
    
    # 初始化模型
    model, tokenizer = init_model(config, args)
    
    # 创建数据集
    train_ds = SimplePretrainDataset(
        data_path=args.data_path,
        tokenizer=tokenizer,
        max_length=args.max_seq_len
    )
    
    # 创建数据加载器
    train_sampler = DistributedSampler(train_ds) if args.ddp else None
    train_loader = DataLoader(
        train_ds,
        batch_size=args.batch_size,
        pin_memory=True,
        drop_last=False,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=train_sampler
    )
    
    # 设置优化器 - 与官方一致
    optimizer = optim.AdamW(
        model.parameters(), 
        lr=args.learning_rate,
        weight_decay=0.01,
        betas=(0.9, 0.95)
    )
    
    # 设置梯度缩放器
    scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16', 'bfloat16']))
    
    # 设置分布式训练
    if args.ddp:
        model._ddp_params_and_buffers_to_ignore = {"pos_cis"}
        model = DistributedDataParallel(model, device_ids=[args.device.index])
    
    # 开始训练
    Logger(f"开始预训练，共{args.epochs}个epoch")
    for epoch in range(args.epochs):
        if args.ddp:
            train_sampler.set_epoch(epoch)
        train_epoch(epoch, wandb, model, optimizer, scaler, train_loader, args, config, tokenizer)
    
    # 保存最终模型
    if not args.ddp or not dist.is_initialized() or dist.get_rank() == 0:
        final_model_path = os.path.join(args.output_dir, "final_model")
        os.makedirs(final_model_path, exist_ok=True)
        
        if isinstance(model, DistributedDataParallel):
            state_dict = model.module.state_dict()
        else:
            state_dict = model.state_dict()
        
        # 半精度保存
        state_dict = {k: v.half() for k, v in state_dict.items()}
        torch.save(state_dict, os.path.join(final_model_path, "pytorch_model.bin"))
        
        # 保存配置
        config_dict = {
            "model_type": "llm",
            "config": {
                k: v for k, v in config.__dict__.items() 
                if not k.startswith('_') and not callable(v)
            },
            "training_args": {
                k: str(v) if hasattr(v, '__class__') and 'device' in str(type(v)) else v
                for k, v in vars(args).items()
            }
        }
        with open(os.path.join(final_model_path, "config.json"), "w") as f:
            json.dump(config_dict, f, indent=2)
        
        # 保存分词器
        tokenizer.save_pretrained(final_model_path)
        
        Logger(f"最终模型已保存到: {final_model_path}")
    
    if wandb:
        wandb.finish()

if __name__ == "__main__":
    main()
