#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Text2Code LoRA 微调脚本

功能:
- 基于 CodeT5-base 进行 LoRA 微调
- 支持 Complex/Simple 任务分离训练
- 集成 MBPP 数据集
- 支持断点恢复、自定义停止条件
- RTX 5090 性能优化

环境变量:
  T2C_TRAIN_KIND: 训练类型 (complex|simple|both)
  T2C_EPOCHS: 训练轮数 (默认 4)
  T2C_RESUME: 恢复策略 (auto|always|never)
  
使用示例:
  python train_text2code.py
  T2C_TRAIN_KIND=complex python train_text2code.py
"""

import os

# 设置 Hugging Face 镜像 (必须在导入 transformers 前设置)
os.environ.setdefault("HF_ENDPOINT", "https://hf-mirror.com")
os.environ.setdefault("HF_HUB_BASE_URL", os.environ["HF_ENDPOINT"])
os.environ.setdefault("HUGGINGFACE_HUB_BASE_URL", os.environ["HF_ENDPOINT"])

import json
import torch
from transformers import (
    AutoTokenizer,
    AutoModelForSeq2SeqLM,
    Trainer,
    TrainingArguments,
    DataCollatorForSeq2Seq,
    TrainerCallback,
)
from transformers.trainer_utils import get_last_checkpoint
from peft import LoraConfig, get_peft_model
from datasets import Dataset
import inspect

# ===================== #
# 配置: 路径和基础模型
# ===================== #
BASE_MODEL_ID = os.getenv("T2C_BASE_MODEL", "Salesforce/codet5-base")
LOCAL_MODEL_PATH = f"./models/{BASE_MODEL_ID.replace('/', '_')}"

def _pick_model_name():
    """选择模型路径: 优先使用本地缓存"""
    if os.path.exists(LOCAL_MODEL_PATH):
        return LOCAL_MODEL_PATH, True
    return BASE_MODEL_ID, False  # 使用镜像在线加载

MODEL_NAME, _LOCAL_ONLY = _pick_model_name()
DATA_DIR = "data/processed"
MBPP_DIR = "data/processed_mbpp"

OUTPUT_DIR = os.getenv("T2C_OUTPUT_DIR", "model/text2code_lora")
OUTPUT_DIR_COMPLEX = OUTPUT_DIR + "_complex"
OUTPUT_DIR_SIMPLE = OUTPUT_DIR + "_simple"
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(OUTPUT_DIR_COMPLEX, exist_ok=True)
os.makedirs(OUTPUT_DIR_SIMPLE, exist_ok=True)

# ===================== #
# 环境变量配置 (训练速度相关)
# ===================== #

def _env_flag(name: str, default: bool = False) -> bool:
    """解析布尔型环境变量"""
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "y", "on"}

def _env_list(name: str, default: str) -> list[str]:
    """解析列表型环境变量"""
    v = os.getenv(name, default)
    return [x.strip() for x in v.split(",") if x.strip()]

# 批次大小与梯度累积 - 针对 RTX 5090 32GB 优化 (吞吐量提升 2倍)
TRAIN_BSZ = int(os.getenv("T2C_TRAIN_BSZ", "16"))  # 8->16, 利用更大显存
EVAL_BSZ = int(os.getenv("T2C_EVAL_BSZ", str(TRAIN_BSZ)))
GRAD_ACCUM = int(os.getenv("T2C_GRAD_ACCUM", "1"))  # 2->1, 减少累积延迟

# 序列长度限制
MAX_SOURCE_LEN = int(os.getenv("T2C_MAX_SRC", "512"))
MAX_TARGET_LEN = int(os.getenv("T2C_MAX_TGT", "512"))

# 混合精度训练
MP_AUTO = _env_flag("T2C_MP_AUTO", True)  # 自动选择: Ampere+ 使用 bf16, 否则 fp16
MP_FP16 = _env_flag("T2C_FP16", True)
MP_BF16 = _env_flag("T2C_BF16", False)

# LoRA 配置
LORA_R = int(os.getenv("T2C_LORA_R", "8"))
LORA_ALPHA = int(os.getenv("T2C_LORA_ALPHA", "32"))
LORA_DROPOUT = float(os.getenv("T2C_LORA_DROPOUT", "0.1"))
LORA_TARGETS = _env_list("T2C_LORA_TARGETS", "q,v")

# 训练频率控制
LEARNING_RATE = float(os.getenv("T2C_LR", "2e-4"))
LOG_STEPS = int(os.getenv("T2C_LOG_STEPS", "200"))
SAVE_STEPS = int(os.getenv("T2C_SAVE_STEPS", "1500"))
EVAL_STEPS = int(os.getenv("T2C_EVAL_STEPS", "1500"))
SAVE_TOTAL_LIMIT = int(os.getenv("T2C_SAVE_TOTAL", "2"))

# 优化器配置 (切换到融合优化器,速度提升 ~15%)
OPTIMIZER = os.getenv("T2C_OPTIM", "adamw_torch_fused")
LR_SCHEDULER = os.getenv("T2C_LR_SCHEDULER", "linear")
MAX_GRAD_NORM = float(os.getenv("T2C_MAX_GRAD_NORM", "1.0"))

# 性能优化参数 (针对 25 vCPU + 90GB RAM 优化)
NUM_WORKERS = int(os.getenv("T2C_NUM_WORKERS", "8"))  # 4->8, 利用更多 CPU 核心
PIN_MEMORY = _env_flag("T2C_PIN_MEMORY", True)
GRAD_CHECKPOINT = _env_flag("T2C_GRAD_CHKPT", False)  # LoRA 参数少,保持禁用
TORCH_COMPILE = _env_flag("T2C_TORCH_COMPILE", True)   # PyTorch 2.8+ 编译加速
FUSED_ADAMW = _env_flag("T2C_FUSED_ADAMW", True)  # 融合优化器 (~15% 加速)

# 训练恢复策略: auto|always|never
RESUME_POLICY = os.getenv("T2C_RESUME", "auto").lower()
USE_TASK_PREFIX = os.getenv("T2C_USE_TASK_PREFIX", "false").strip().lower() in {"1", "true", "yes", "y", "on"}
PREFIX_SIMPLE = os.getenv("T2C_PREFIX_SIMPLE", "[TASK=SIMPLE] ")
PREFIX_COMPLEX = os.getenv("T2C_PREFIX_COMPLEX", "[TASK=COMPLEX] ")

# 数据预处理并行度 (利用多核 CPU 加速)
PREPROCESS_WORKERS = int(os.getenv("T2C_PREPROCESS_WORKERS", "8"))

# ===================== #
# 自定义停止条件
# 规则1: 连续 N 次评估损失改善幅度 < 阈值 -> 停止
# 规则2: 评估损失反弹 (相比上次增加超过阈值) -> 停止  
# 规则3: epoch 达到上限 -> 停止
# 环境变量配置:
#   T2C_USE_CUSTOM_STOP (默认启用)
#   T2C_SMALL_IMPROV_THRESHOLD (默认 0.001)
#   T2C_SMALL_IMPROV_PATIENCE (默认 3)
#   T2C_REBOUND_DELTA (默认 0.01)
#   T2C_MAX_EPOCHS (默认 5.0)
# ===================== #
USE_CUSTOM_STOP = _env_flag("T2C_USE_CUSTOM_STOP", True)
SMALL_IMPROV_THRESHOLD = float(os.getenv("T2C_SMALL_IMPROV_THRESHOLD", "0.001"))
SMALL_IMPROV_PATIENCE = int(os.getenv("T2C_SMALL_IMPROV_PATIENCE", "3"))
REBOUND_DELTA = float(os.getenv("T2C_REBOUND_DELTA", "0.01"))
MAX_EPOCHS_STOP = float(os.getenv("T2C_MAX_EPOCHS", "5.0"))


class CustomStopCallback(TrainerCallback):
    """自定义停止回调 - 实现三条停止规则"""
    def __init__(self, small_threshold=SMALL_IMPROV_THRESHOLD, small_patience=SMALL_IMPROV_PATIENCE,
                 rebound_delta=REBOUND_DELTA, max_epochs=MAX_EPOCHS_STOP):
        self.small_threshold = float(small_threshold)
        self.small_patience = int(small_patience)
        self.rebound_delta = float(rebound_delta)
        self.max_epochs = float(max_epochs)

        # 状态跟踪
        self.prev_eval_loss = None
        self.best_eval_loss = None
        self.small_counter = 0

    def on_evaluate(self, args, state, control, metrics=None, **kwargs):
        if metrics is None:
            return
        eval_loss = metrics.get("eval_loss") or metrics.get("eval/loss") or metrics.get("loss")
        if eval_loss is None:
            return

        # 初始化
        if self.prev_eval_loss is None:
            self.prev_eval_loss = eval_loss
            self.best_eval_loss = eval_loss
            return

        # 检查 epoch 上限
        cur_epoch = getattr(state, "epoch", None)
        if cur_epoch is not None and cur_epoch >= self.max_epochs:
            print(f"[STOP] 当前 epoch={cur_epoch} >= 上限 ({self.max_epochs})，停止训练")
            control.should_training_stop = True
            return

        # 计算改善幅度
        improvement = self.prev_eval_loss - eval_loss

        # 检测反弹
        if eval_loss - self.prev_eval_loss > self.rebound_delta:
            print(f"[STOP] 损失反弹 (prev={self.prev_eval_loss:.6f} -> cur={eval_loss:.6f})，超过阈值 {self.rebound_delta}")
            control.should_training_stop = True
            return

        # 检测小幅改善
        if improvement > 0:
            if improvement < self.small_threshold:
                self.small_counter += 1
                print(f"[INFO] 损失减小但小于阈值 ({improvement:.6f} < {self.small_threshold})，累计 {self.small_counter}/{self.small_patience}")
            else:
                self.small_counter = 0  # 重置计数器

            if eval_loss < self.best_eval_loss:
                self.best_eval_loss = eval_loss
        else:
            self.small_counter = 0

        if self.small_counter >= self.small_patience:
            print(f"[STOP] 连续 {self.small_counter} 次改善幅度过小 (< {self.small_threshold})，停止训练")
            control.should_training_stop = True
            return
        
        self.prev_eval_loss = eval_loss

# ===================== #
# 加载数据集
# ===================== #
def read_jsonl(path):
    """读取 JSONL 文件"""
    data = []
    with open(path, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                data.append(json.loads(line))
            except:
                continue
    return data

def _select_dataset_paths(kind: str):
    """选择数据集路径: 'complex' | 'simple'"""
    kind = (kind or "").lower()
    if kind == "complex":
        return os.path.join(DATA_DIR, "complex_train_text2code.jsonl"), os.path.join(DATA_DIR, "complex_valid_text2code.jsonl")
    elif kind == "simple":
        return os.path.join(DATA_DIR, "simple_train_text2code.jsonl"), os.path.join(DATA_DIR, "simple_valid_text2code.jsonl")
    else:
        return os.path.join(DATA_DIR, "train_text2code.jsonl"), os.path.join(DATA_DIR, "valid_text2code.jsonl")


# 训练模式配置 (仅环境变量)
TRAIN_KIND = os.getenv("T2C_TRAIN_KIND", "both").lower()  # 'complex'|'simple'|'both'

# 训练轮数配置
EPOCHS = int(os.getenv("T2C_EPOCHS", "4"))

# ===================== #
# 数据集加载
# ===================== #

def _load_data_for_paths(train_path, valid_path):
    """从指定路径加载训练和验证数据"""
    train_data = []
    valid_data = []
    if os.path.exists(train_path):
        train_data = read_jsonl(train_path)
    else:
        print(f"[WARN] 训练文件不存在: {train_path}")
    if os.path.exists(valid_path):
        valid_data = read_jsonl(valid_path)
    else:
        print(f"[WARN] 验证文件不存在: {valid_path}")
    return train_data, valid_data


# 加载初始数据集用于报告
train_path, valid_path = _select_dataset_paths(TRAIN_KIND if TRAIN_KIND != "both" else "complex")

train_data, valid_data = _load_data_for_paths(train_path, valid_path)

# 合并 MBPP 数据集 (如果可用)
mbpp_train = os.path.join(MBPP_DIR, "train_text2code.jsonl")
mbpp_valid = os.path.join(MBPP_DIR, "valid_text2code.jsonl")
added_mbpp = False
if os.path.exists(mbpp_train) and os.path.exists(mbpp_valid):
    mbpp_tr = read_jsonl(mbpp_train)
    mbpp_va = read_jsonl(mbpp_valid)
    before_tr, before_va = len(train_data), len(valid_data)
    
    # 简单去重: 基于输出哈希
    def _dedup_concat(base, extra):
        seen = {hash(json.dumps(x.get("output", ""), ensure_ascii=False)) for x in base}
        for x in extra:
            h = hash(json.dumps(x.get("output", ""), ensure_ascii=False))
            if h not in seen:
                base.append(x)
                seen.add(h)
        return base
    
    train_data = _dedup_concat(train_data, mbpp_tr)
    valid_data = _dedup_concat(valid_data, mbpp_va)
    added_mbpp = True
    print(f"[INFO] 已合并 MBPP: 训练集 {before_tr}->{len(train_data)}, 验证集 {before_va}->{len(valid_data)}")

    # CodeSearchNet 下采样 (在 MBPP 过采样前)
    # 通过环境变量 COMPLEX_DOWNSAMPLE 控制, 0 表示不下采样
    try:
        _downsample_size = int(os.getenv("COMPLEX_DOWNSAMPLE", "0"))
    except Exception:
        _downsample_size = 0
    if _downsample_size > 0 and before_tr > _downsample_size:
        import random
        random.seed(42)
        # 只下采样 CodeSearchNet 部分
        complex_part = train_data[:before_tr]
        mbpp_part = train_data[before_tr:]
        complex_sampled = random.sample(complex_part, _downsample_size)
        train_data = complex_sampled + mbpp_part
        print(f"[INFO] CodeSearchNet 已下采样: {before_tr} -> {_downsample_size}, 最终训练集 {len(train_data)}")

    # MBPP 过采样: 通过环境变量 MBPP_OVERSAMPLE 控制 (默认 2 倍), 仅用于训练集
    try:
        _os_factor = int(os.getenv("MBPP_OVERSAMPLE", "2"))
    except Exception:
        _os_factor = 2
    if _os_factor > 1 and len(mbpp_tr) > 0:
        import random
        random.seed(42)
        extra = []
        target_extra = len(mbpp_tr) * (_os_factor - 1)
        for _ in range(target_extra):
            extra.append(random.choice(mbpp_tr))
        before_os = len(train_data)
        train_data.extend(extra)
        print(f"[INFO] MBPP 已过采样: 倍数={_os_factor}, 训练集 {before_os}->{len(train_data)}")

print(f"[SUCCESS] Loaded training samples: {len(train_data)}, validation samples: {len(valid_data)}" + (" (+MBPP)" if added_mbpp else ""))

# 混合精度选择
use_bf16 = False
use_fp16 = False
if torch.cuda.is_available():
    try:
        major_cc = torch.cuda.get_device_capability(0)[0]
    except Exception:
        major_cc = 0
    if MP_BF16 or (MP_AUTO and major_cc >= 8):
        use_bf16 = True
    else:
        use_fp16 = MP_FP16

print(f"[CONFIG] 混合精度: bf16={use_bf16}, fp16={use_fp16}")
print(f"[CONFIG] 性能优化: batch_size={TRAIN_BSZ}, grad_accum={GRAD_ACCUM}, workers={NUM_WORKERS}")
print(f"[CONFIG] 数据预处理: {PREPROCESS_WORKERS} 进程并行")
print(f"[CONFIG] 优化器: {OPTIMIZER}, torch.compile={'enabled' if TORCH_COMPILE else 'disabled'}")

# ===================== #
# 数据集封装
# ===================== #
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, local_files_only=_LOCAL_ONLY)

def to_hf_dataset(data):
    """转换为 Hugging Face Dataset 格式"""
    return {
        "instruction": [x["instruction"] for x in data],
        "output": [x["output"] for x in data]
    }


def preprocess_function(examples, max_source_len=MAX_SOURCE_LEN, max_target_len=MAX_TARGET_LEN):
    """预处理批次数据"""
    inputs = examples["instruction"]
    outputs = examples["output"]
    model_inputs = tokenizer(inputs, padding=False, truncation=True, max_length=max_source_len)
    labels = tokenizer(outputs, padding=False, truncation=True, max_length=max_target_len)
    model_inputs["labels"] = labels["input_ids"]
    return model_inputs


from datasets import Dataset
train_dataset = Dataset.from_dict(to_hf_dataset(train_data))
valid_dataset = Dataset.from_dict(to_hf_dataset(valid_data))

# 过滤掉目标过长的样本，避免被硬截断导致学习到不完整代码
def _len_ok(e):
    ids = tokenizer(e["output"], add_special_tokens=False, padding=False, truncation=False).input_ids
    return len(ids) <= MAX_TARGET_LEN

print("[INFO] 过滤过长样本（这可能需要几分钟，请耐心等待）...")
train_dataset = train_dataset.filter(_len_ok, desc=None)  # 不显示进度条
valid_dataset = valid_dataset.filter(_len_ok, desc=None)  # 不显示进度条
print(f"[SUCCESS] 过滤完成：训练集 {len(train_dataset)} 条，验证集 {len(valid_dataset)} 条")

def preprocess_batch(batch):
    return preprocess_function(batch)

# 使用多进程加速数据预处理（利用 25 vCPU）
train_dataset = train_dataset.map(
    preprocess_batch, 
    batched=True, 
    remove_columns=["instruction", "output"],
    num_proc=PREPROCESS_WORKERS,  # 多进程加速
    desc="Preprocessing train data"
)
valid_dataset = valid_dataset.map(
    preprocess_batch, 
    batched=True, 
    remove_columns=["instruction", "output"],
    num_proc=PREPROCESS_WORKERS,
    desc="Preprocessing valid data"
)

# ===================== #
# 模型 + LoRA 配置
# ===================== #
print(f"[INFO] 加载基础模型: {MODEL_NAME} (local_only={_LOCAL_ONLY}) ...")
load_kwargs = {"local_files_only": _LOCAL_ONLY}
if use_bf16:
    load_kwargs["torch_dtype"] = torch.bfloat16
elif use_fp16:
    load_kwargs["torch_dtype"] = torch.float16
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, **load_kwargs)

# LoRA 配置
config = LoraConfig(
    r=LORA_R,
    lora_alpha=LORA_ALPHA,
    target_modules=LORA_TARGETS,
    lora_dropout=LORA_DROPOUT,
    bias="none",
    task_type="SEQ_2_SEQ_LM"
)

model = get_peft_model(model, config)

# LoRA 参数量小（~2M），无需 gradient checkpointing（会降低速度）
# 如需启用可设置 T2C_GRAD_CHKPT=1
if GRAD_CHECKPOINT:
    try:
        model.gradient_checkpointing_enable()
        print("[CONFIG] 已启用 gradient checkpointing（会降低训练速度，仅在显存不足时使用）")
    except Exception as e:
        print(f"[WARN] 启用 gradient checkpointing 失败: {e}")

if TORCH_COMPILE:
    try:
        # 使用 reduce-overhead 模式获得最佳性能（PyTorch 2.8+ 支持）
        model = torch.compile(model, mode="reduce-overhead", fullgraph=False)
        print("[CONFIG] 已启用 torch.compile (reduce-overhead mode)")
    except Exception as e:
        print(f"[WARN] torch.compile 失败: {e}")

# 打印参数规模（可训练参数）
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
total_params = sum(p.numel() for p in model.parameters())
print(f"[CONFIG] 可训练参数: {trainable_params} / 总参数: {total_params} ({100 * trainable_params / total_params:.2f}%)")

# ===================== #
# 训练配置
# ===================== #

# DeepSpeed 配置
USE_DEEPSPEED = _env_flag("T2C_USE_DEEPSPEED", False)
DEEPSPEED_CONFIG_PATH = os.getenv("T2C_DEEPSPEED_CONFIG", "ds_config_zero2.json")

if USE_DEEPSPEED:
    print(f"[DEEPSPEED] 启用 DeepSpeed ZeRO-2 优化")
    print(f"[DEEPSPEED] 配置文件: {DEEPSPEED_CONFIG_PATH}")
    print(f"[DEEPSPEED] 优化器卸载到 CPU，预计节省显存 ~30%")

_ta_common_args = dict(
    output_dir=OUTPUT_DIR,
    save_strategy="steps",
    save_steps=SAVE_STEPS,
    eval_steps=EVAL_STEPS,
    logging_steps=LOG_STEPS,
    learning_rate=LEARNING_RATE,
    per_device_train_batch_size=TRAIN_BSZ,
    per_device_eval_batch_size=EVAL_BSZ,
    num_train_epochs=EPOCHS,
    gradient_accumulation_steps=GRAD_ACCUM,
    warmup_ratio=float(os.getenv("T2C_WARMUP_RATIO", "0.1")),
    weight_decay=float(os.getenv("T2C_WEIGHT_DECAY", "0.01")),
    max_grad_norm=MAX_GRAD_NORM,
    optim=OPTIMIZER,
    lr_scheduler_type=LR_SCHEDULER,
    fp16=use_fp16,
    bf16=use_bf16 if "bf16" in TrainingArguments.__init__.__code__.co_varnames else False,
    dataloader_pin_memory=PIN_MEMORY,
    dataloader_num_workers=NUM_WORKERS,
    dataloader_prefetch_factor=int(os.getenv("T2C_PREFETCH", "4")),  # 新增：预取 4 个批次到 RAM
    dataloader_persistent_workers=True,  # 新增：持久化 worker 进程，避免重启开销
    deepspeed=DEEPSPEED_CONFIG_PATH if USE_DEEPSPEED else None,  # DeepSpeed 配置
    logging_dir=f"{OUTPUT_DIR}/logs",
    save_total_limit=SAVE_TOTAL_LIMIT,
    report_to=os.getenv("T2C_REPORT_TO", "none"),
    overwrite_output_dir=_env_flag("T2C_OVERWRITE_OUT", True),
    ddp_find_unused_parameters=False,  # 新增：禁用未使用参数检测（单 GPU 无影响，多 GPU 加速）
    torch_compile=TORCH_COMPILE,  # 新增：在 TrainingArguments 中启用编译
    torch_compile_backend="inductor" if TORCH_COMPILE else None,  # 新增：使用 inductor 后端
    torch_compile_mode="reduce-overhead" if TORCH_COMPILE else None,  # 新增：编译模式
)

# 兼容不同 transformers 版本的评估参数名
_sig = inspect.signature(TrainingArguments.__init__)
if "evaluation_strategy" in _sig.parameters:
    _ta_common_args["evaluation_strategy"] = "steps"
elif "eval_strategy" in _sig.parameters:
    _ta_common_args["eval_strategy"] = "steps"
elif "evaluate_during_training" in _sig.parameters:
    _ta_common_args["evaluate_during_training"] = True

# 如果 transformers 支持这些参数，为了配合 CustomStopCallback，启用在训练结束时加载最佳模型
if "load_best_model_at_end" in _sig.parameters:
    _ta_common_args["load_best_model_at_end"] = USE_CUSTOM_STOP
if "metric_for_best_model" in _sig.parameters:
    _ta_common_args["metric_for_best_model"] = "eval_loss"
if "greater_is_better" in _sig.parameters:
    _ta_common_args["greater_is_better"] = False

def _make_training_args(out_dir: str) -> TrainingArguments:
    """为指定数据集创建专属的训练参数 (独立输出和日志目录)"""
    _args = dict(_ta_common_args)
    _args["output_dir"] = out_dir
    _args["logging_dir"] = f"{out_dir}/logs"
    ta = TrainingArguments(**_args)
    # 设置梯度检查点 (如果 transformers 版本支持)
    if "gradient_checkpointing" in inspect.signature(TrainingArguments.__init__).parameters:
        try:
            setattr(ta, "gradient_checkpointing", GRAD_CHECKPOINT)
        except Exception:
            pass
    return ta

# 默认共享训练参数 (具体训练时将使用数据集专属参数)
training_args = TrainingArguments(**_ta_common_args)

# 为默认实例设置梯度检查点
if "gradient_checkpointing" in inspect.signature(TrainingArguments.__init__).parameters:
    try:
        setattr(training_args, "gradient_checkpointing", GRAD_CHECKPOINT)
    except Exception:
        pass

data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)

def _build_trainer(model, training_args, train_dataset, valid_dataset, tokenizer, data_collator):
    callbacks = []
    if USE_CUSTOM_STOP:
        callbacks.append(CustomStopCallback())
    return Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=valid_dataset,
        tokenizer=tokenizer,
        data_collator=data_collator,
        callbacks=callbacks if callbacks else None,
    )


def _is_checkpoint_compatible(ckpt_dir: str) -> bool:
    """检查检查点是否与当前 LoRA 配置兼容"""
    try:
        cfg_path = os.path.join(ckpt_dir, "adapter_config.json")
        with open(cfg_path, "r", encoding="utf-8") as f:
            cfg = json.load(f)
        cr = int(cfg.get("r")) if cfg.get("r") is not None else None
        tm = cfg.get("target_modules")
        tm = [str(x) for x in (tm or [])]
        return (cr == LORA_R) and (sorted(tm) == sorted([str(x) for x in LORA_TARGETS]))
    except Exception:
        return False

def _train_one(kind: str, out_dir: str, train_dataset, valid_dataset):
    """在单个数据集上训练 LoRA (complex 或 simple)"""
    print(f"[TRAINING] 开始在 {kind} 数据集上微调 LoRA，输出目录: {out_dir}")
    # 重新加载基础模型以避免交叉污染
    load_kwargs_local = {"local_files_only": _LOCAL_ONLY}
    if use_bf16:
        load_kwargs_local["torch_dtype"] = torch.bfloat16
    elif use_fp16:
        load_kwargs_local["torch_dtype"] = torch.float16
    base_model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, **load_kwargs_local)
    lora_config = LoraConfig(
        r=LORA_R,
        lora_alpha=LORA_ALPHA,
        target_modules=LORA_TARGETS,
        lora_dropout=LORA_DROPOUT,
        bias="none",
        task_type="SEQ_2_SEQ_LM",
    )
    model_local = get_peft_model(base_model, lora_config)
    if GRAD_CHECKPOINT:
        try:
            model_local.gradient_checkpointing_enable()
        except Exception:
            pass
    
    # 为该数据集构建专属训练参数 (独立的检查点和日志目录)
    ta_local = _make_training_args(out_dir)
    data_collator_local = DataCollatorForSeq2Seq(tokenizer, model=model_local)
    trainer_local = _build_trainer(model_local, ta_local, train_dataset, valid_dataset, tokenizer, data_collator_local)

    # 检查是否有可恢复的检查点
    last_checkpoint = None
    if os.path.isdir(out_dir):
        last_checkpoint = get_last_checkpoint(out_dir)

    resume_path = None
    if last_checkpoint:
        if RESUME_POLICY == "never":
            resume_path = None
        elif RESUME_POLICY == "always":
            resume_path = last_checkpoint
        else:  # auto
            if _is_checkpoint_compatible(last_checkpoint):
                resume_path = last_checkpoint
            else:
                resume_path = None

    # 开始训练
    if resume_path:
        trainer_local.train(resume_from_checkpoint=resume_path)
    else:
        trainer_local.train()

    # 保存最佳模型 (load_best_model_at_end=True 时自动加载最佳)
    try:
        trainer_local.save_model(out_dir)
    except Exception:
        model_local.save_pretrained(out_dir)
    tokenizer.save_pretrained(out_dir)
    print(f"[SUCCESS] 已保存 LoRA 到 {out_dir}")
    
    # 保存训练收敛曲线
    _save_training_convergence(trainer_local, out_dir)


def _save_training_convergence(trainer: Trainer, out_dir: str):
    os.makedirs(out_dir, exist_ok=True)
    history = list(getattr(trainer.state, "log_history", []))
    if not history:
        print("[WARN] 未获取到训练日志历史，跳过收敛曲线绘制")
        return

    # 提取 loss 随 step 的变化
    train_steps, train_losses = [], []
    eval_steps, eval_losses = [], []
    for rec in history:
        step = rec.get("step")
        if step is None:
            continue
        if "loss" in rec:
            train_steps.append(step)
            train_losses.append(rec["loss"])
        if "eval_loss" in rec:
            eval_steps.append(step)
            eval_losses.append(rec["eval_loss"])

    # 保存 CSV，便于外部分析
    csv_path = os.path.join(out_dir, "training_log.csv")
    try:
        import csv
        with open(csv_path, "w", newline="", encoding="utf-8") as f:
            writer = csv.writer(f)
            writer.writerow(["type", "step", "loss"])
            for s, l in zip(train_steps, train_losses):
                writer.writerow(["train", s, l])
            for s, l in zip(eval_steps, eval_losses):
                writer.writerow(["eval", s, l])
        print(f"[SAVED] 训练日志CSV: {csv_path}")
    except Exception as e:
        print(f"[WARN] 保存训练日志CSV失败: {e}")

    # 绘制并保存收敛曲线
    try:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(8, 5), dpi=120)
        if train_steps:
            plt.plot(train_steps, train_losses, label="train loss", color="#1f77b4", linewidth=1.5)
        if eval_steps:
            plt.plot(eval_steps, eval_losses, label="eval loss", color="#ff7f0e", linestyle="--", linewidth=1.5)
        plt.xlabel("step")
        plt.ylabel("loss")
        plt.title("Training Convergence")
        plt.grid(True, alpha=0.3)
        plt.legend()
        png_path = os.path.join(out_dir, "training_convergence.png")
        plt.tight_layout()
        plt.savefig(png_path)
        plt.close()
        print(f"[SAVED] 收敛曲线图: {png_path}")
    except ImportError:
        print("[WARN] 未安装 matplotlib，已跳过曲线绘制。可执行 'pip install matplotlib' 后重训或根据 CSV 自行绘制。")
    except Exception as e:
        print(f"[WARN] 绘制收敛曲线失败: {e}")

# ===================== #
# 主训练流程: 支持 complex/simple/both 三种模式
# ===================== #

if TRAIN_KIND in {"complex", "both"}:
    # 训练 Complex 数据集
    c_train_path, c_valid_path = _select_dataset_paths('complex')
    c_train_data, c_valid_data = _load_data_for_paths(c_train_path, c_valid_path)
    
    # 为指令添加任务前缀 (可选)
    if USE_TASK_PREFIX and c_train_data:
        for x in c_train_data:
            x["instruction"] = f"{PREFIX_COMPLEX}{x['instruction']}"
    if USE_TASK_PREFIX and c_valid_data:
        for x in c_valid_data:
            x["instruction"] = f"{PREFIX_COMPLEX}{x['instruction']}"
    
    from datasets import Dataset as _Dataset
    c_train_ds = _Dataset.from_dict(to_hf_dataset(c_train_data)) if c_train_data else None
    c_valid_ds = _Dataset.from_dict(to_hf_dataset(c_valid_data)) if c_valid_data else None
    
    if c_train_ds is not None:
        c_train_ds = c_train_ds.filter(_len_ok, num_proc=PREPROCESS_WORKERS).map(
            preprocess_batch, batched=True, remove_columns=["instruction", "output"], 
            num_proc=PREPROCESS_WORKERS, desc="预处理 Complex 训练集"
        )
    if c_valid_ds is not None:
        c_valid_ds = c_valid_ds.filter(_len_ok, num_proc=PREPROCESS_WORKERS).map(
            preprocess_batch, batched=True, remove_columns=["instruction", "output"],
            num_proc=PREPROCESS_WORKERS, desc="预处理 Complex 验证集"
        )
    
    if c_train_ds is not None:
        _train_one('complex', OUTPUT_DIR_COMPLEX, c_train_ds, c_valid_ds)

if TRAIN_KIND in {"simple", "both"}:
    # 训练 Simple 数据集
    s_train_path, s_valid_path = _select_dataset_paths('simple')
    s_train_data, s_valid_data = _load_data_for_paths(s_train_path, s_valid_path)
    
    # 为指令添加任务前缀 (可选)
    if USE_TASK_PREFIX and s_train_data:
        for x in s_train_data:
            x["instruction"] = f"{PREFIX_SIMPLE}{x['instruction']}"
    if USE_TASK_PREFIX and s_valid_data:
        for x in s_valid_data:
            x["instruction"] = f"{PREFIX_SIMPLE}{x['instruction']}"
    
    from datasets import Dataset as _Dataset
    s_train_ds = _Dataset.from_dict(to_hf_dataset(s_train_data)) if s_train_data else None
    s_valid_ds = _Dataset.from_dict(to_hf_dataset(s_valid_data)) if s_valid_data else None
    
    if s_train_ds is not None:
        s_train_ds = s_train_ds.filter(_len_ok, num_proc=PREPROCESS_WORKERS).map(
            preprocess_batch, batched=True, remove_columns=["instruction", "output"],
            num_proc=PREPROCESS_WORKERS, desc="预处理 Simple 训练集"
        )
    if s_valid_ds is not None:
        s_valid_ds = s_valid_ds.filter(_len_ok, num_proc=PREPROCESS_WORKERS).map(
            preprocess_batch, batched=True, remove_columns=["instruction", "output"],
            num_proc=PREPROCESS_WORKERS, desc="预处理 Simple 验证集"
        )
    
    if s_train_ds is not None:
        _train_one('simple', OUTPUT_DIR_SIMPLE, s_train_ds, s_valid_ds)

print("[INFO] 训练控制流程结束。请检查对应输出目录。")

