#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
知识蒸馏训练脚本

教师模型: CodeT5-base (220M 参数)
学生模型: CodeT5-small (60M 参数)

核心思想:
1. 教师生成软标签 (logits)
2. 学生同时学习硬标签 (真实代码) 和软标签
3. 最终学生模型更小但性能接近教师

使用方法:
    python distillation_train.py --temperature 2.0 --alpha 0.7
"""

import os
import json
import torch
import torch.nn.functional as F
import inspect
from transformers import (
    AutoTokenizer,
    AutoModelForSeq2SeqLM,
    Trainer,
    TrainingArguments,
    DataCollatorForSeq2Seq,
)
from transformers.trainer_utils import get_last_checkpoint
from peft import LoraConfig, get_peft_model
from datasets import Dataset
import argparse

os.environ.setdefault("HF_ENDPOINT", "https://hf-mirror.com")
# 避免分词器在多进程环境下的并行警告
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")

# ===================== #
# 知识蒸馏配置
# ===================== #
# 优先使用本地模型,避免网络下载
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOCAL_TEACHER_MODEL = os.path.join(BASE_DIR, "models", "Salesforce_codet5-base")
LOCAL_STUDENT_MODEL = os.path.join(BASE_DIR, "models", "Salesforce_codet5-small")

# 检查本地模型是否存在 (输出调试信息)
print(f"[DEBUG] 基础目录: {BASE_DIR}")
print(f"[DEBUG] 查找教师模型: {LOCAL_TEACHER_MODEL}")
print(f"[DEBUG] 教师模型存在: {os.path.exists(LOCAL_TEACHER_MODEL)}")
print(f"[DEBUG] 查找学生模型: {LOCAL_STUDENT_MODEL}")
print(f"[DEBUG] 学生模型存在: {os.path.exists(LOCAL_STUDENT_MODEL)}")

# 检查模型文件完整性 (需要 config.json 和模型权重)
def check_local_model(model_path):
    """检查本地模型是否完整"""
    if not os.path.exists(model_path):
        return False
    required_files = ["config.json", "tokenizer_config.json"]
    # 检查必需文件
    for f in required_files:
        if not os.path.exists(os.path.join(model_path, f)):
            print(f"[WARN] 缺少 {f} 在 {model_path}")
            return False
    # 检查模型权重 (.bin 或 .safetensors)
    has_weights = (
        os.path.exists(os.path.join(model_path, "pytorch_model.bin")) or
        os.path.exists(os.path.join(model_path, "model.safetensors"))
    )
    if not has_weights:
        print(f"[WARN] 未找到模型权重文件在 {model_path}")
    return has_weights

# 选择模型路径: 本地优先,否则从 HuggingFace 下载
if check_local_model(LOCAL_TEACHER_MODEL):
    TEACHER_MODEL = LOCAL_TEACHER_MODEL
    print(f"[INFO] 使用本地教师模型: {TEACHER_MODEL}")
else:
    TEACHER_MODEL = "Salesforce/codet5-base"
    print(f"[INFO] 本地教师模型未找到，将从 HuggingFace 下载")

if check_local_model(LOCAL_STUDENT_MODEL):
    STUDENT_MODEL = LOCAL_STUDENT_MODEL
    print(f"[INFO] 使用本地学生模型: {STUDENT_MODEL}")
else:
    STUDENT_MODEL = "Salesforce/codet5-small"
    print(f"[INFO] 本地学生模型未找到，将从 HuggingFace 下载")

TEMPERATURE = 2.0      # 蒸馏温度 (softmax 平滑)
ALPHA = 0.7           # 蒸馏损失权重 (0.7 * KL + 0.3 * CE)

DATA_DIR = os.path.join(BASE_DIR, "data", "processed")
OUTPUT_DIR = os.path.join(BASE_DIR, "model", "distilled_student")

# 训练恢复策略
RESUME_POLICY = os.getenv("T2C_RESUME", "auto").lower()  # auto|always|never

# 性能优化
BATCH_SIZE = 12       # 针对双模型 (教师+学生) 在 RTX 5090 上优化
GRAD_ACCUM = 1        # 大批次无需累积
NUM_WORKERS = 8       # 并行数据加载
PREFETCH_FACTOR = 4   # 预取批次数


# ===================== #
# 自定义蒸馏 Trainer
# ===================== #
class DistillationTrainer(Trainer):
    """支持知识蒸馏的自定义 Trainer"""
    
    def __init__(self, teacher_model, temperature=2.0, alpha=0.7, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.teacher = teacher_model
        self.teacher.eval()  # Teacher is frozen in eval mode
        self.temperature = temperature
        self.alpha = alpha
        
        # 将教师模型移动到与训练设备相同的位置
        if torch.cuda.is_available():
            self.teacher = self.teacher.cuda()
    
    # 兼容 transformers.Trainer 在新版本里可能提供的 num_items_in_batch 参数
    def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
        """自定义损失：
        总损失 = α * 蒸馏损失(KL) + (1-α) * 交叉熵损失
        """
        # 学生模型前向
        student_outputs = model(**inputs)
        student_logits = student_outputs.logits  # [batch, seq_len, vocab_size]
        student_loss = student_outputs.loss      # 学生标准交叉熵损失

        # 教师前向（不计算梯度，冻结用于提供软标签）
        with torch.no_grad():
            # 保持 labels 以启用 teacher-forcing，从而与 labels 对齐获得 logits
            # 这里不关心 teacher 的 loss，后续仅使用 logits
            teacher_outputs = self.teacher(**inputs)
            teacher_logits = teacher_outputs.logits

        # 计算蒸馏损失（KL 散度）并使用 temperature 平滑 logits
        student_soft = F.log_softmax(student_logits / self.temperature, dim=-1)
        teacher_soft = F.softmax(teacher_logits / self.temperature, dim=-1)

        # KL 散度及归一化处理：
        # 1) 使用 reduction='sum' 在词表维度求和（原为 'batchmean'）
        # 2) 手动按 token 总数平均
        # 3) 乘以 T^2 恢复量纲
        distillation_loss = F.kl_div(
            student_soft,
            teacher_soft,
            reduction='sum'
        )

        # 按总 token 数（batch_size * seq_len）归一化
        num_tokens = student_logits.size(0) * student_logits.size(1)
        distillation_loss = distillation_loss / num_tokens * (self.temperature ** 2)

        # 总损失 = 蒸馏损失 + 交叉熵损失
        total_loss = self.alpha * distillation_loss + (1 - self.alpha) * student_loss

        return (total_loss, student_outputs) if return_outputs else total_loss


# ===================== #
# 数据加载函数
# ===================== #
def read_jsonl(path):
    data = []
    with open(path, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                data.append(json.loads(line))
            except:
                continue
    return data


def prepare_dataset(data, tokenizer, max_length=224):
    """将输入/输出并行分词并构造 HuggingFace Dataset"""
    instructions = [x["instruction"] for x in data]
    outputs = [x["output"] for x in data]
    
    model_inputs = tokenizer(
        instructions, 
        max_length=max_length, 
        truncation=True, 
        padding=False
    )
    labels = tokenizer(
        outputs, 
        max_length=max_length, 
        truncation=True, 
        padding=False
    )
    
    model_inputs["labels"] = labels["input_ids"]
    return Dataset.from_dict(model_inputs)


# ===================== #
# 训练收敛可视化与日志保存
# ===================== #
def _save_training_convergence(trainer, out_dir):
    """保存训练日志和收敛曲线（与 train_text2code.py 一致）"""
    os.makedirs(out_dir, exist_ok=True)
    history = list(getattr(trainer.state, "log_history", []))
    if not history:
        print("[WARN] 未获取到训练日志历史，跳过收敛曲线绘制")
        return

    # 提取 loss 随 step 的变化
    train_steps, train_losses = [], []
    eval_steps, eval_losses = [], []
    for rec in history:
        step = rec.get("step")
        if step is None:
            continue
        if "loss" in rec:
            train_steps.append(step)
            train_losses.append(rec["loss"])
        if "eval_loss" in rec:
            eval_steps.append(step)
            eval_losses.append(rec["eval_loss"])

    # 保存 CSV，便于外部分析
    csv_path = os.path.join(out_dir, "training_log.csv")
    try:
        import csv
        with open(csv_path, "w", newline="", encoding="utf-8") as f:
            writer = csv.writer(f)
            writer.writerow(["type", "step", "loss"])
            for s, l in zip(train_steps, train_losses):
                writer.writerow(["train", s, l])
            for s, l in zip(eval_steps, eval_losses):
                writer.writerow(["eval", s, l])
        print(f"[SAVED] 训练日志CSV: {csv_path}")
    except Exception as e:
        print(f"[WARN] 保存训练日志CSV失败: {e}")

    # 绘制并保存收敛曲线
    try:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(8, 5), dpi=120)
        if train_steps:
            plt.plot(train_steps, train_losses, label="train loss", color="#1f77b4", linewidth=1.5)
        if eval_steps:
            plt.plot(eval_steps, eval_losses, label="eval loss", color="#ff7f0e", linestyle="--", linewidth=1.5)
        plt.xlabel("step")
        plt.ylabel("loss")
        plt.title("Knowledge Distillation Training Convergence")
        plt.grid(True, alpha=0.3)
        plt.legend()
        png_path = os.path.join(out_dir, "training_convergence.png")
        plt.tight_layout()
        plt.savefig(png_path)
        plt.close()
        print(f"[SAVED] 收敛曲线图: {png_path}")
    except ImportError:
        print("[WARN] 未安装 matplotlib，已跳过曲线绘制。可执行 'pip install matplotlib' 后重训或根据 CSV 自行绘制。")
    except Exception as e:
        print(f"[WARN] 绘制收敛曲线失败: {e}")


# ===================== #
# 检查点与 LoRA 配置兼容性检查
# ===================== #
def _is_checkpoint_compatible(ckpt_dir):
    """检查检查点是否与当前 LoRA 配置兼容"""
    try:
        cfg_path = os.path.join(ckpt_dir, "adapter_config.json")
        with open(cfg_path, "r", encoding="utf-8") as f:
            cfg = json.load(f)
        # 检查 LoRA r 和 target_modules 是否匹配
        cr = int(cfg.get("r")) if cfg.get("r") is not None else None
        tm = cfg.get("target_modules")
        tm = [str(x) for x in (tm or [])]
        return (cr == 8) and (sorted(tm) == sorted(["q", "v"]))
    except Exception:
        return False


# ===================== #
# 主训练流程
# ===================== #
def main(args):
    print("=" * 50)
    print("   知识蒸馏训练（已优化）")
    print("=" * 50)
    print(f"教师模型: {TEACHER_MODEL}")
    print(f"学生模型: {STUDENT_MODEL}")
    print(f"蒸馏温度: {args.temperature}")
    print(f"蒸馏权重 (alpha): {args.alpha}")
    print(f"批次大小: {args.batch_size}")
    print(f"数据加载线程数: {NUM_WORKERS}")
    print("")
    
    tokenizer = AutoTokenizer.from_pretrained(STUDENT_MODEL)
    
    train_path = os.path.join(DATA_DIR, "complex_train_text2code.jsonl")
    valid_path = os.path.join(DATA_DIR, "complex_valid_text2code.jsonl")
    
    train_data = read_jsonl(train_path)
    valid_data = read_jsonl(valid_path)
    
    if args.sample_size > 0:
        import random
        random.seed(42)
        train_data = random.sample(train_data, min(args.sample_size, len(train_data)))
    
    print(f"训练样本数: {len(train_data)}")
    print(f"验证样本数: {len(valid_data)}")
    
    train_dataset = prepare_dataset(train_data, tokenizer)
    valid_dataset = prepare_dataset(valid_data, tokenizer)
    
    print("\n正在加载教师模型...")
    teacher_model = AutoModelForSeq2SeqLM.from_pretrained(TEACHER_MODEL)
    teacher_model.eval()
    for param in teacher_model.parameters():
        param.requires_grad = False

    print("正在加载学生模型（使用 LoRA）...")
    student_model = AutoModelForSeq2SeqLM.from_pretrained(STUDENT_MODEL)
 
    lora_config = LoraConfig(
        r=8,
        lora_alpha=32,
        target_modules=["q", "v"],
        lora_dropout=0.1,
        bias="none",
        task_type="SEQ_2_SEQ_LM"
    )
    student_model = get_peft_model(student_model, lora_config)

    teacher_params = sum(p.numel() for p in teacher_model.parameters())
    student_total = sum(p.numel() for p in student_model.parameters())
    student_trainable = sum(p.numel() for p in student_model.parameters() if p.requires_grad)
    
    print(f"\n[参数对比]")
    print(f"教师参数总量: {teacher_params:,} ({teacher_params/1e6:.1f}M)")
    print(f"学生参数总量: {student_total:,} ({student_total/1e6:.1f}M)")
    print(f"学生可训练参数: {student_trainable:,} ({student_trainable/1e6:.1f}M)")
    print(f"压缩比: {teacher_params / student_total:.2f}x")
    print("")
  
    _sig = inspect.signature(TrainingArguments.__init__)
    _params = _sig.parameters
    ta_kwargs = dict(
        output_dir=OUTPUT_DIR,
        num_train_epochs=args.epochs,
        per_device_train_batch_size=args.batch_size,
        per_device_eval_batch_size=args.batch_size,
        gradient_accumulation_steps=GRAD_ACCUM,
        learning_rate=args.lr,
        warmup_ratio=0.1,
        logging_steps=100,
        save_steps=1000,
        eval_steps=1000,
        save_total_limit=2,
        fp16=torch.cuda.is_available(),
        report_to="none",
        dataloader_num_workers=NUM_WORKERS,
        max_grad_norm=1.0, 
    )

    if "evaluation_strategy" in _params:
        ta_kwargs["evaluation_strategy"] = "steps"
    elif "eval_strategy" in _params:
        ta_kwargs["eval_strategy"] = "steps"
    elif "evaluate_during_training" in _params:
        ta_kwargs["evaluate_during_training"] = True

    if "dataloader_prefetch_factor" in _params and NUM_WORKERS > 0:
        ta_kwargs["dataloader_prefetch_factor"] = PREFETCH_FACTOR
    if "dataloader_persistent_workers" in _params and NUM_WORKERS > 0:
        ta_kwargs["dataloader_persistent_workers"] = True
    if "dataloader_pin_memory" in _params:
        ta_kwargs["dataloader_pin_memory"] = True if torch.cuda.is_available() else False

    if "load_best_model_at_end" in _params:
        ta_kwargs["load_best_model_at_end"] = True
    if "metric_for_best_model" in _params:
        ta_kwargs["metric_for_best_model"] = "eval_loss"
    if "greater_is_better" in _params:
        ta_kwargs["greater_is_better"] = False

    if "optim" in _params:
        ta_kwargs["optim"] = "adamw_torch_fused" if torch.cuda.is_available() else "adamw_torch"

    training_args = TrainingArguments(**ta_kwargs)
    
    print("已启用性能优化：")
    fused = getattr(training_args, "optim", "") == "adamw_torch_fused"
    print(f"  - 融合 AdamW（Fused AdamW）: {fused}")
    print(f"  - FP16 混合精度: {torch.cuda.is_available()}")
    print(f"  - DataLoader 工作线程: {NUM_WORKERS}")
    if "dataloader_prefetch_factor" in _params:
        print(f"  - 预取因子: {PREFETCH_FACTOR}")
    print("")

    data_collator = DataCollatorForSeq2Seq(tokenizer, model=student_model)
    
    trainer = DistillationTrainer(
        teacher_model=teacher_model,
        temperature=args.temperature,
        alpha=args.alpha,
        model=student_model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=valid_dataset,
        tokenizer=tokenizer,
        data_collator=data_collator,
    )

    print("开始蒸馏训练...")

    last_checkpoint = None
    if os.path.isdir(OUTPUT_DIR):
        last_checkpoint = get_last_checkpoint(OUTPUT_DIR)
    
    resume_path = None
    if last_checkpoint:
        if RESUME_POLICY == "never":
            print(f"[INFO] 发现检查点 {last_checkpoint}，但 RESUME_POLICY=never，开始新的训练")
            resume_path = None
        elif RESUME_POLICY == "always":
            print(f"[INFO] 从检查点恢复训练: {last_checkpoint}")
            resume_path = last_checkpoint
        else: 
            if _is_checkpoint_compatible(last_checkpoint):
                print(f"[INFO] 从兼容的检查点恢复: {last_checkpoint}")
                resume_path = last_checkpoint
            else:
                print(f"[WARN] 检查点 {last_checkpoint} 与当前配置不兼容，开始新的训练")
                resume_path = None
    
    if resume_path:
        trainer.train(resume_from_checkpoint=resume_path)
    else:
        trainer.train()
    
    _save_training_convergence(trainer, OUTPUT_DIR)
    
    student_model.save_pretrained(OUTPUT_DIR)
    tokenizer.save_pretrained(OUTPUT_DIR)
    print(f"\n[SUCCESS] 学生模型已保存到: {OUTPUT_DIR}")
    
    config_info = {
        "teacher_model": TEACHER_MODEL,
        "student_model": STUDENT_MODEL,
        "temperature": args.temperature,
        "alpha": args.alpha,
        "teacher_params": teacher_params,
        "student_params": student_total,
        "compression_ratio": float(teacher_params / student_total),
        "batch_size": args.batch_size,
        "epochs": args.epochs,
    }
    
    with open(os.path.join(OUTPUT_DIR, "distillation_config.json"), "w") as f:
        json.dump(config_info, f, indent=2)
    
    print("\n蒸馏训练完成！")
    print(f"压缩比: {config_info['compression_ratio']:.2f}x")
    print(f"理论加速: ~{config_info['compression_ratio']:.1f}x")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="知识蒸馏训练脚本")
    parser.add_argument("--temperature", type=float, default=2.0, help="蒸馏温度 (temperature)")
    parser.add_argument("--alpha", type=float, default=0.7, help="蒸馏损失权重 (alpha)")
    parser.add_argument("--epochs", type=int, default=3, help="训练轮数")
    parser.add_argument("--batch_size", type=int, default=BATCH_SIZE, help="批次大小")
    parser.add_argument("--lr", type=float, default=1e-4, help="学习率（为稳定性已降低）")
    parser.add_argument("--sample_size", type=int, default=0, help="采样数量（0=全部）")
    
    args = parser.parse_args()
    main(args)
