#!/usr/bin/env python3
"""
增强版推理蒸馏训练脚本 - 支持DDP、FSDP等
"""

import os
import sys
import argparse
import time
import math
import warnings
import torch
import torch.distributed as dist
from torch import optim, nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, DistributedSampler
from contextlib import nullcontext

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from simple_model import SimpleLLMForCausalLM
from hf_tokenizer import MiniMindTokenizer
from sft_dataset import SFTDataset
from model_config import get_minimind2_small_config, get_minimind2_config, get_minimind2_moe_config

warnings.filterwarnings('ignore')

def Logger(content, ddp=False):
    """分布式日志"""
    if not ddp or not dist.is_initialized() or dist.get_rank() == 0:
        print(content)

def get_lr(current_step, total_steps, lr):
    """余弦学习率调度"""
    return lr / 10 + 0.5 * lr * (1 + math.cos(math.pi * current_step / total_steps))

def reasoning_distillation_loss(student_logits, teacher_logits, labels, loss_mask, 
                               temperature=4.0, alpha=0.7, beta=0.3):
    """计算推理蒸馏损失"""
    # 1. 交叉熵损失
    ce_loss = nn.CrossEntropyLoss(reduction='none')(student_logits.view(-1, student_logits.size(-1)), labels.view(-1))
    ce_loss = ce_loss.view(labels.size())
    if loss_mask is not None:
        ce_loss = (ce_loss * loss_mask).sum() / loss_mask.sum()
    else:
        ce_loss = ce_loss.mean()
    
    # 2. 标准蒸馏损失
    if teacher_logits is not None:
        # 确保词汇表大小一致
        if student_logits.size(-1) != teacher_logits.size(-1):
            teacher_logits = teacher_logits[..., :student_logits.size(-1)]
        
        # KL散度损失
        student_log_probs = nn.functional.log_softmax(student_logits / temperature, dim=-1)
        teacher_probs = nn.functional.softmax(teacher_logits / temperature, dim=-1)
        
        distill_loss = nn.functional.kl_div(student_log_probs, teacher_probs, reduction='batchmean')
        distill_loss = distill_loss * (temperature ** 2)
    else:
        distill_loss = torch.tensor(0.0, device=student_logits.device)
    
    # 3. 推理一致性损失（模拟推理过程）
    reasoning_loss = torch.tensor(0.0, device=student_logits.device)
    if teacher_logits is not None:
        # 计算推理步骤的一致性
        # 这里简化实现，实际可以更复杂
        student_attention = student_logits.mean(dim=-1)  # 简化的注意力表示
        teacher_attention = teacher_logits.mean(dim=-1)
        
        reasoning_loss = nn.functional.mse_loss(student_attention, teacher_attention)
    
    # 4. 总损失
    total_loss = alpha * ce_loss + beta * distill_loss + (1 - alpha - beta) * reasoning_loss
    
    return total_loss, ce_loss, distill_loss, reasoning_loss

def train_epoch(student_model, teacher_model, optimizer, scaler, train_loader, epoch, args, config, tokenizer, wandb=None):
    """训练一个epoch"""
    student_model.train()
    if teacher_model is not None:
        teacher_model.eval()
        teacher_model.requires_grad_(False)
    
    start_time = time.time()
    iter_per_epoch = len(train_loader)
    
    # 设置混合精度上下文
    device_type = "cuda" if "cuda" in str(args.device) else "cpu"
    ctx = nullcontext() if device_type == "cpu" else torch.cuda.amp.autocast()
    
    for step, batch in enumerate(train_loader):
        # 移动数据到设备
        input_ids = batch['input_ids'].to(args.device)
        labels = batch['labels'].to(args.device)
        loss_mask = batch.get('loss_mask', None)
        if loss_mask is not None:
            loss_mask = loss_mask.to(args.device)
        
        # 学习率调度
        lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch, args.learning_rate)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        
        # 学生模型前向传播
        with ctx:
            student_outputs = student_model(input_ids=input_ids, labels=labels)
            student_logits = student_outputs['logits']
        
        # 教师模型前向传播
        teacher_logits = None
        if teacher_model is not None:
            with torch.no_grad():
                teacher_outputs = teacher_model(input_ids=input_ids, labels=labels)
                teacher_logits = teacher_outputs['logits']
        
        # 计算损失
        total_loss, ce_loss, distill_loss, reasoning_loss = reasoning_distillation_loss(
            student_logits, teacher_logits, labels, loss_mask,
            temperature=args.temperature, alpha=args.alpha, beta=args.beta
        )
        
        total_loss = total_loss / args.accumulation_steps
        
        # 反向传播
        scaler.scale(total_loss).backward()
        
        if (step + 1) % args.accumulation_steps == 0:
            scaler.unscale_(optimizer)
            torch.nn.utils.clip_grad_norm_(student_model.parameters(), args.grad_clip)
            
            scaler.step(optimizer)
            scaler.update()
            optimizer.zero_grad(set_to_none=True)
        
        # 日志
        if step % args.log_interval == 0:
            spend_time = time.time() - start_time
            ddp = int(os.environ.get("RANK", -1)) != -1
            Logger(f'Epoch:[{epoch+1}/{args.epochs}]({step}/{iter_per_epoch}) '
                  f'total_loss:{total_loss.item()*args.accumulation_steps:.4f} '
                  f'ce_loss:{ce_loss.item():.4f} '
                  f'distill_loss:{distill_loss.item():.4f} '
                  f'reasoning_loss:{reasoning_loss.item():.4f} '
                  f'lr:{lr:.8f} '
                  f'time:{spend_time/(step+1)*iter_per_epoch//60:.1f}min', ddp)
            
            if wandb and (not ddp or not dist.is_initialized() or dist.get_rank() == 0):
                wandb.log({
                    "total_loss": total_loss.item() * args.accumulation_steps,
                    "ce_loss": ce_loss.item(),
                    "distill_loss": distill_loss.item(),
                    "reasoning_loss": reasoning_loss.item(),
                    "lr": lr,
                    "epoch": epoch,
                    "step": step
                })
        
        # 保存检查点
        if (step + 1) % args.save_interval == 0 and (not ddp or dist.get_rank() == 0):
            student_model.eval()
            checkpoint_dir = os.path.join(args.output_dir, "reason_distill_checkpoints")
            os.makedirs(checkpoint_dir, exist_ok=True)
            
            # 获取模型状态字典
            if isinstance(student_model, DistributedDataParallel):
                state_dict = student_model.module.state_dict()
            else:
                state_dict = student_model.state_dict()
            
            # 保存检查点
            checkpoint_path = os.path.join(checkpoint_dir, f"checkpoint-{step}.pth")
            torch.save({
                'model_state_dict': state_dict,
                'optimizer_state_dict': optimizer.state_dict(),
                'step': step,
                'epoch': epoch,
                'loss': total_loss.item(),
                'args': args
            }, checkpoint_path)
            
            Logger(f"检查点已保存到: {checkpoint_path}")
            student_model.train()

def init_teacher_model(config, args):
    """初始化教师模型"""
    model = SimpleLLMForCausalLM(config)
    
    if args.teacher_model and os.path.exists(args.teacher_model):
        Logger(f"加载教师模型从: {args.teacher_model}")
        if os.path.isdir(args.teacher_model):
            model_path = os.path.join(args.teacher_model, "pytorch_model.bin")
        else:
            model_path = args.teacher_model
        
        if os.path.exists(model_path):
            state_dict = torch.load(model_path, map_location=args.device)
            model.load_state_dict(state_dict, strict=False)
            Logger("教师模型加载成功!")
        else:
            Logger("教师模型文件不存在，使用随机初始化")
    else:
        Logger("教师模型路径不存在，使用随机初始化")
    
    model = model.to(args.device)
    model.eval()
    model.requires_grad_(False)
    
    total_params = sum(p.numel() for p in model.parameters())
    ddp = int(os.environ.get("RANK", -1)) != -1
    Logger(f'教师模型参数量: {total_params / 1e6:.1f}M', ddp)
    
    return model

def init_student_model(config, args):
    """初始化学生模型"""
    model = SimpleLLMForCausalLM(config)
    
    if args.student_model and os.path.exists(args.student_model):
        Logger(f"加载学生模型从: {args.student_model}")
        if os.path.isdir(args.student_model):
            model_path = os.path.join(args.student_model, "pytorch_model.bin")
        else:
            model_path = args.student_model
        
        if os.path.exists(model_path):
            state_dict = torch.load(model_path, map_location=args.device)
            model.load_state_dict(state_dict, strict=False)
            Logger("学生模型加载成功!")
        else:
            Logger("学生模型路径不存在，使用随机初始化")
    else:
        Logger("学生模型路径不存在，使用随机初始化")
    
    model = model.to(args.device)
    
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    ddp = int(os.environ.get("RANK", -1)) != -1
    Logger(f'学生模型总参数量: {total_params / 1e6:.1f}M', ddp)
    Logger(f'学生模型可训练参数量: {trainable_params / 1e6:.1f}M', ddp)
    
    return model

def init_distributed_mode():
    """初始化分布式训练"""
    dist.init_process_group(backend="nccl")
    local_rank = int(os.environ["LOCAL_RANK"])
    torch.cuda.set_device(local_rank)

def setup_fsdp(model, args):
    """设置FSDP（如果可用）"""
    try:
        from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
        from torch.distributed.fsdp import MixedPrecision
        from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
        
        # 定义自动包装策略
        auto_wrap_policy = transformer_auto_wrap_policy(
            transformer_layer_cls={torch.nn.TransformerEncoderLayer}
        )
        
        # 混合精度配置
        mixed_precision_policy = MixedPrecision(
            param_dtype=torch.bfloat16,
            reduce_dtype=torch.bfloat16,
            buffer_dtype=torch.bfloat16,
        )
        
        # 创建FSDP模型
        model = FSDP(
            model,
            auto_wrap_policy=auto_wrap_policy,
            mixed_precision=mixed_precision_policy,
            device_id=torch.cuda.current_device(),
        )
        
        Logger("FSDP已启用")
        return model
    except ImportError:
        Logger("FSDP不可用，使用标准DDP")
        return model

def main():
    parser = argparse.ArgumentParser(description="增强版推理蒸馏训练")
    parser.add_argument("--output_dir", type=str, default="./reason_distill_output")
    parser.add_argument("--data_path", type=str, 
                       default="/Users/sd/Desktop/mycode/myalgo/milvus/minimind/my_llm_implementation/datasets/pretrain_hq.jsonl")
    parser.add_argument("--teacher_model", type=str, 
                       default="./sft_output/final_model",
                       help="教师模型路径")
    parser.add_argument("--student_model", type=str, 
                       default="./sft_output/final_model",
                       help="学生模型路径")
    parser.add_argument("--teacher_config", type=str, default="minimind2", 
                       choices=["minimind2-small", "minimind2", "minimind2-moe"])
    parser.add_argument("--student_config", type=str, default="minimind2-small", 
                       choices=["minimind2-small", "minimind2", "minimind2-moe"])
    parser.add_argument("--epochs", type=int, default=1)
    parser.add_argument("--batch_size", type=int, default=2)
    parser.add_argument("--learning_rate", type=float, default=5e-6)
    parser.add_argument("--temperature", type=float, default=4.0, help="蒸馏温度")
    parser.add_argument("--alpha", type=float, default=0.5, help="CE损失权重")
    parser.add_argument("--beta", type=float, default=0.3, help="蒸馏损失权重")
    parser.add_argument("--device", type=str, default="cuda:0" if torch.cuda.is_available() else "cpu")
    parser.add_argument("--dtype", type=str, default="bfloat16")
    parser.add_argument("--use_wandb", action="store_true")
    parser.add_argument("--wandb_project", type=str, default="MiniMind-Reason-Distill-Advanced")
    parser.add_argument("--num_workers", type=int, default=1)
    parser.add_argument("--ddp", action="store_true")
    parser.add_argument("--fsdp", action="store_true")
    parser.add_argument("--accumulation_steps", type=int, default=1)
    parser.add_argument("--grad_clip", type=float, default=1.0)
    parser.add_argument("--warmup_iters", type=int, default=0)
    parser.add_argument("--log_interval", type=int, default=100)
    parser.add_argument("--save_interval", type=int, default=100)
    parser.add_argument("--max_seq_len", type=int, default=512)
    parser.add_argument("--local_rank", type=int, default=-1)
    
    args = parser.parse_args()
    
    # 设置设备
    if args.ddp and int(os.environ.get("RANK", -1)) != -1:
        init_distributed_mode()
        local_rank = int(os.environ["LOCAL_RANK"])
        args.device = f"cuda:{local_rank}"
    
    args.device = torch.device(args.device)
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 设置随机种子
    base_seed = 1337
    torch.manual_seed(base_seed)
    torch.cuda.manual_seed(base_seed)
    
    if args.ddp and dist.is_initialized():
        rank = dist.get_rank()
        torch.manual_seed(base_seed + rank)
        torch.cuda.manual_seed(base_seed + rank)
    
    # 初始化WandB
    wandb = None
    if args.use_wandb and (not args.ddp or dist.get_rank() == 0):
        import wandb
        wandb.init(project=args.wandb_project, name=f"reason-distill-{args.student_config}")
    
    # 加载配置
    config_map = {
        "minimind2-small": get_minimind2_small_config,
        "minimind2": get_minimind2_config,
        "minimind2-moe": get_minimind2_moe_config
    }
    teacher_config = config_map[args.teacher_config]()
    student_config = config_map[args.student_config]()
    
    # 初始化模型
    teacher_model = init_teacher_model(teacher_config, args)
    student_model = init_student_model(student_config, args)
    
    # 创建数据集
    train_ds = SFTDataset(
        data_path=args.data_path,
        tokenizer=MiniMindTokenizer(),
        max_length=args.max_seq_len
    )
    
    # 创建数据加载器
    train_sampler = DistributedSampler(train_ds) if args.ddp else None
    train_loader = DataLoader(
        train_ds,
        batch_size=args.batch_size,
        pin_memory=True,
        drop_last=False,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=train_sampler
    )
    
    # 设置优化器
    optimizer = optim.AdamW(student_model.parameters(), lr=args.learning_rate)
    
    # 设置梯度缩放器
    scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16', 'bfloat16']))
    
    # 设置分布式训练
    if args.ddp:
        student_model._ddp_params_and_buffers_to_ignore = {"pos_cis"}
        student_model = DistributedDataParallel(student_model, device_ids=[args.device.index])
    elif args.fsdp:
        student_model = setup_fsdp(student_model, args)
    
    # 开始训练
    Logger(f"开始推理蒸馏训练，共{args.epochs}个epoch")
    for epoch in range(args.epochs):
        if args.ddp:
            train_sampler.set_epoch(epoch)
        train_epoch(student_model, teacher_model, optimizer, scaler, train_loader, epoch, args, config, tokenizer, wandb)
    
    # 保存最终模型
    if not args.ddp or not dist.is_initialized() or dist.get_rank() == 0:
        final_model_path = os.path.join(args.output_dir, "final_model")
        os.makedirs(final_model_path, exist_ok=True)
        
        if isinstance(student_model, DistributedDataParallel):
            state_dict = student_model.module.state_dict()
        else:
            state_dict = student_model.state_dict()
        
        torch.save(state_dict, os.path.join(final_model_path, "pytorch_model.bin"))
        
        # 保存配置
        import json
        config_dict = {
            "model_type": "llm",
            "config": student_config.__dict__,
            "training_args": vars(args)
        }
        with open(os.path.join(final_model_path, "config.json"), "w") as f:
            json.dump(config_dict, f, indent=2)
        
        Logger(f"最终模型已保存到: {final_model_path}")
    
    if wandb:
        wandb.finish()

if __name__ == "__main__":
    main()
