#!/usr/bin/env python3
"""
修复的预训练脚本 - 使用loss_mask
"""

import os
import sys
import argparse
import time
import math
import warnings
import torch
import torch.distributed as dist
from torch import optim, nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, DistributedSampler
from contextlib import nullcontext
import json

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from simple_model import SimpleLLMForCausalLM
from hf_tokenizer import MiniMindTokenizer
from simple_dataset import SimplePretrainDataset
from model_config import get_minimind2_small_config, get_minimind2_config, get_minimind2_moe_config
from advanced_trainer import AdvancedTrainer

warnings.filterwarnings('ignore')

def Logger(content, ddp=False):
    """分布式日志"""
    if not ddp or not dist.is_initialized() or dist.get_rank() == 0:
        print(content)

def get_lr(current_step, total_steps, lr):
    """改进的学习率调度 - 使用warmup + cosine decay"""
    warmup_steps = total_steps * 0.1  # 10%的步数用于warmup
    
    if current_step < warmup_steps:
        # Warmup阶段：线性增长
        return lr * (current_step / warmup_steps)
    else:
        # Cosine decay阶段
        progress = (current_step - warmup_steps) / (total_steps - warmup_steps)
        return lr * 0.5 * (1 + math.cos(math.pi * progress))

def train_epoch(model, optimizer, scaler, train_loader, epoch, args, config, tokenizer, wandb=None):
    """修复的训练epoch - 使用loss_mask"""
    model.train()
    loss_fct = nn.CrossEntropyLoss(reduction='none')
    start_time = time.time()
    iter_per_epoch = len(train_loader)
    
    # 设置混合精度上下文
    device_type = "cuda" if "cuda" in str(args.device) else "cpu"
    ctx = nullcontext() if device_type == "cpu" else torch.cuda.amp.autocast()
    
    total_loss = 0.0
    valid_steps = 0
    
    for step, batch in enumerate(train_loader):
        # 移动数据到设备
        input_ids = batch['input_ids'].to(args.device)
        labels = batch['labels'].to(args.device)
        loss_mask = batch['loss_mask'].to(args.device)  # 关键修复：使用loss_mask
        
        # 学习率调度
        lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch, args.learning_rate)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        
        # 前向传播
        with ctx:
            outputs = model(input_ids=input_ids, labels=labels)
            logits = outputs['logits']
            
            # 计算损失 - 使用loss_mask
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            shift_loss_mask = loss_mask[..., 1:].contiguous()
            
            # 展平
            shift_logits = shift_logits.view(-1, shift_logits.size(-1))
            shift_labels = shift_labels.view(-1)
            shift_loss_mask = shift_loss_mask.view(-1)
            
            # 计算每个位置的损失
            losses = loss_fct(shift_logits, shift_labels)
            
            # 应用loss_mask并计算平均损失
            masked_losses = losses * shift_loss_mask
            loss = masked_losses.sum() / shift_loss_mask.sum()
            loss = loss / args.accumulation_steps
        
        # 反向传播
        scaler.scale(loss).backward()
        
        if (step + 1) % args.accumulation_steps == 0:
            scaler.unscale_(optimizer)
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
            
            scaler.step(optimizer)
            scaler.update()
            optimizer.zero_grad(set_to_none=True)
            
            total_loss += loss.item() * args.accumulation_steps
            valid_steps += 1
        
        # 日志
        if step % args.log_interval == 0:
            spend_time = time.time() - start_time
            ddp = int(os.environ.get("RANK", -1)) != -1
            avg_loss = total_loss / max(valid_steps, 1)
            
            Logger(f'Epoch:[{epoch+1}/{args.epochs}]({step}/{iter_per_epoch}) '
                  f'loss:{loss.item()*args.accumulation_steps:.4f} '
                  f'avg_loss:{avg_loss:.4f} '
                  f'lr:{lr:.8f} '
                  f'time:{spend_time/(step+1)*iter_per_epoch//60:.1f}min', ddp)
            
            if wandb and (not ddp or not dist.is_initialized() or dist.get_rank() == 0):
                wandb.log({
                    "loss": loss.item() * args.accumulation_steps,
                    "avg_loss": avg_loss,
                    "lr": lr,
                    "epoch": epoch,
                    "step": step
                })
        
        # 保存检查点
        if (step + 1) % args.save_interval == 0 and (not ddp or not dist.is_initialized() or dist.get_rank() == 0):
            model.eval()
            checkpoint_dir = os.path.join(args.output_dir, "pretrain_checkpoints")
            os.makedirs(checkpoint_dir, exist_ok=True)
            
            # 获取模型状态字典
            if isinstance(model, DistributedDataParallel):
                state_dict = model.module.state_dict()
                unwrapped_model = model.module
            else:
                state_dict = model.state_dict()
                unwrapped_model = model
            
            # 保存检查点（兼容HuggingFace格式）
            checkpoint_path = os.path.join(checkpoint_dir, f"checkpoint-{step}")
            os.makedirs(checkpoint_path, exist_ok=True)
            
            # 保存模型权重
            torch.save(state_dict, os.path.join(checkpoint_path, "pytorch_model.bin"))
            
            # 保存配置
            config_dict = {
                "model_type": "llm",
                "config": {
                    k: v for k, v in config.__dict__.items() 
                    if not k.startswith('_') and not callable(v)
                },
                "training_args": {
                    k: str(v) if hasattr(v, '__class__') and 'device' in str(type(v)) else v
                    for k, v in vars(args).items()
                }
            }
            with open(os.path.join(checkpoint_path, "config.json"), "w") as f:
                json.dump(config_dict, f, indent=2)
            
            # 保存分词器
            tokenizer.save_pretrained(checkpoint_path)
            
            # 保存优化器状态
            optimizer_path = os.path.join(checkpoint_path, "optimizer.pt")
            torch.save(optimizer.state_dict(), optimizer_path)
            
            Logger(f"检查点已保存到: {checkpoint_path}")
            model.train()

def init_model(config, args):
    """初始化模型和分词器"""
    tokenizer = MiniMindTokenizer()
    model = SimpleLLMForCausalLM(config)
    model = model.to(args.device)
    
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    ddp = int(os.environ.get("RANK", -1)) != -1
    Logger(f'模型总参数量: {total_params / 1e6:.1f}M', ddp)
    Logger(f'可训练参数量: {trainable_params / 1e6:.1f}M', ddp)
    
    return model, tokenizer

def init_distributed_mode():
    """初始化分布式训练"""
    dist.init_process_group(backend="nccl")
    local_rank = int(os.environ["LOCAL_RANK"])
    torch.cuda.set_device(local_rank)
    return local_rank

def setup_fsdp(model, args):
    """设置FSDP"""
    from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
    from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
    from model_components import TransformerBlock
    import functools
    
    auto_wrap_policy = functools.partial(
        transformer_auto_wrap_policy,
        transformer_layer_cls={TransformerBlock}
    )
    
    model = FSDP(
        model,
        auto_wrap_policy=auto_wrap_policy,
        mixed_precision=torch.distributed.fsdp.MixedPrecision(
            param_dtype=torch.bfloat16,
            reduce_dtype=torch.bfloat16,
            buffer_dtype=torch.bfloat16,
        ),
    )
    return model

def main():
    parser = argparse.ArgumentParser(description="修复的预训练脚本 - 使用loss_mask")
    
    # 数据参数
    parser.add_argument("--data_path", type=str, default="./datasets/better_pretrain_data.jsonl", help="预训练数据路径")
    parser.add_argument("--output_dir", type=str, default="./pretrain_fixed_output", help="输出目录")
    parser.add_argument("--config", type=str, default="minimind2-small", choices=["minimind2-small", "minimind2", "minimind2-moe"], help="模型配置")
    
    # 训练参数
    parser.add_argument("--batch_size", type=int, default=8, help="批次大小")
    parser.add_argument("--max_seq_len", type=int, default=512, help="最大序列长度")
    parser.add_argument("--epochs", type=int, default=3, help="训练轮数")
    parser.add_argument("--learning_rate", type=float, default=5e-4, help="学习率")
    parser.add_argument("--accumulation_steps", type=int, default=4, help="梯度累积步数")
    parser.add_argument("--grad_clip", type=float, default=1.0, help="梯度裁剪")
    parser.add_argument("--dtype", type=str, default="bfloat16", choices=["float32", "float16", "bfloat16"], help="数据类型")
    
    # 保存和日志参数
    parser.add_argument("--save_interval", type=int, default=50, help="保存间隔")
    parser.add_argument("--log_interval", type=int, default=10, help="日志间隔")
    parser.add_argument("--num_workers", type=int, default=4, help="数据加载器工作进程数")
    
    # 分布式训练参数
    parser.add_argument("--ddp", action="store_true", help="使用DDP")
    parser.add_argument("--fsdp", action="store_true", help="使用FSDP")
    
    # WandB参数
    parser.add_argument("--use_wandb", action="store_true", help="使用WandB")
    parser.add_argument("--wandb_project", type=str, default="minimind-pretrain", help="WandB项目名")
    
    args = parser.parse_args()
    
    # 设置设备
    if torch.cuda.is_available():
        if args.ddp:
            local_rank = init_distributed_mode()
            args.device = torch.device(f"cuda:{local_rank}")
        else:
            args.device = torch.device("cuda:0")
    else:
        args.device = torch.device("cpu")
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 设置随机种子
    base_seed = 1337
    torch.manual_seed(base_seed)
    torch.cuda.manual_seed(base_seed)
    
    # 检查是否在分布式环境中
    is_distributed = dist.is_initialized()
    
    if args.ddp and is_distributed:
        rank = dist.get_rank()
        torch.manual_seed(base_seed + rank)
        torch.cuda.manual_seed(base_seed + rank)
    
    # 初始化WandB
    wandb = None
    if args.use_wandb and (not args.ddp or not is_distributed or dist.get_rank() == 0):
        import wandb
        wandb.init(project=args.wandb_project, name=f"pretrain-fixed-{args.config}")
    
    # 加载配置
    config_map = {
        "minimind2-small": get_minimind2_small_config,
        "minimind2": get_minimind2_config,
        "minimind2-moe": get_minimind2_moe_config
    }
    config = config_map[args.config]()
    
    # 初始化模型
    model, tokenizer = init_model(config, args)
    
    # 创建数据集
    train_ds = SimplePretrainDataset(
        data_path=args.data_path,
        tokenizer=tokenizer,
        max_length=args.max_seq_len
    )
    
    # 创建数据加载器
    train_sampler = DistributedSampler(train_ds) if args.ddp else None
    train_loader = DataLoader(
        train_ds,
        batch_size=args.batch_size,
        pin_memory=True,
        drop_last=False,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=train_sampler
    )
    
    # 设置优化器 - 使用AdamW with weight decay
    optimizer = optim.AdamW(
        model.parameters(), 
        lr=args.learning_rate,
        weight_decay=0.01,  # 添加权重衰减
        betas=(0.9, 0.95)   # 调整beta参数
    )
    
    # 设置梯度缩放器
    scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16', 'bfloat16']))
    
    # 设置分布式训练
    if args.ddp:
        model._ddp_params_and_buffers_to_ignore = {"pos_cis"}
        model = DistributedDataParallel(model, device_ids=[args.device.index])
    elif args.fsdp:
        model = setup_fsdp(model, args)
    
    # 开始训练
    Logger(f"开始修复的预训练，共{args.epochs}个epoch")
    for epoch in range(args.epochs):
        if args.ddp:
            train_sampler.set_epoch(epoch)
        train_epoch(model, optimizer, scaler, train_loader, epoch, args, config, tokenizer, wandb)
    
    # 保存最终模型
    if not args.ddp or not dist.is_initialized() or dist.get_rank() == 0:
        final_model_path = os.path.join(args.output_dir, "final_model")
        os.makedirs(final_model_path, exist_ok=True)
        
        if isinstance(model, DistributedDataParallel):
            state_dict = model.module.state_dict()
        else:
            state_dict = model.state_dict()
        
        torch.save(state_dict, os.path.join(final_model_path, "pytorch_model.bin"))
        
        # 保存配置
        config_dict = {
            "model_type": "llm",
            "config": {
                k: v for k, v in config.__dict__.items() 
                if not k.startswith('_') and not callable(v)
            },
            "training_args": {
                k: str(v) if hasattr(v, '__class__') and 'device' in str(type(v)) else v
                for k, v in vars(args).items()
            }
        }
        with open(os.path.join(final_model_path, "config.json"), "w") as f:
            json.dump(config_dict, f, indent=2)
        
        # 保存分词器
        tokenizer.save_pretrained(final_model_path)
        
        Logger(f"最终模型已保存到: {final_model_path}")
    
    if wandb:
        wandb.finish()

if __name__ == "__main__":
    main()
