#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
多任务学习训练脚本 v2 (优化版)
支持 4 个任务: 代码生成、代码摘要、代码补全、语法检查

使用方法:
    python multitask_train.py --task_weights 1.0,0.5,0.4,0.3 --epochs 4
"""

import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import (
    AutoTokenizer,
    AutoModelForSeq2SeqLM,
    Trainer,
    TrainingArguments,
    DataCollatorForSeq2Seq,
)
from transformers.trainer_utils import get_last_checkpoint
from peft import LoraConfig, get_peft_model
from datasets import Dataset, concatenate_datasets
import argparse
import random
from tqdm import tqdm

os.environ.setdefault("HF_ENDPOINT", "https://hf-mirror.com")

# ==================== 本地模型路径配置 ====================
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOCAL_BASE_MODEL = os.path.join(BASE_DIR, "models", "Salesforce_codet5-base")

def get_model_path():
    """获取模型路径（优先本地）"""
    if os.path.exists(LOCAL_BASE_MODEL) and os.path.exists(os.path.join(LOCAL_BASE_MODEL, "config.json")):
        print(f"[INFO] Using local model: {LOCAL_BASE_MODEL}")
        return LOCAL_BASE_MODEL
    else:
        print(f"[INFO] Local model not found, will use: Salesforce/codet5-base")
        return "Salesforce/codet5-base"

# 训练恢复策略: auto|always|never
RESUME_POLICY = os.getenv("T2C_RESUME", "auto").lower()

# 性能优化参数
BATCH_SIZE = 16       # 针对 RTX 5090 优化
GRAD_ACCUM = 1        # 无需梯度累积
NUM_WORKERS = 8       # 并行数据加载
PREFETCH_FACTOR = 4   # 预取批次数

# ==================== 数据准备 (优化版) ====================

def read_jsonl(path):
    """读取 JSONL 文件并处理错误"""
    data = []
    with open(path, 'r', encoding='utf-8') as f:
        for line_num, line in enumerate(f, 1):
            try:
                data.append(json.loads(line))
            except json.JSONDecodeError as e:
                print(f"Warning: Skipping line {line_num} due to JSON error: {e}")
                continue
    return data


def prepare_all_tasks_optimized(data_path, sample_sizes=None):
    """准备所有任务数据 (优化: 单次文件读取)
    
    核心优化: 只读取一次文件,然后分配给各任务
    旧版本读取同一文件 4 次,造成 I/O 浪费
    """
    if sample_sizes is None:
        sample_sizes = {
            "generation": None,      # 使用全部
            "summarization": 5000,
            "completion": 3000,
            "syntax_check": 3000
        }
    
    print("=" * 50)
    print("准备多任务数据 (优化版)...")
    print("=" * 50)
    
    # 只读取一次文件 (核心优化)
    print(f"从 {data_path} 读取数据...")
    all_data = read_jsonl(data_path)
    print(f"已加载 {len(all_data)} 个样本")
    
    if len(all_data) == 0:
        raise ValueError(f"No data found in {data_path}")
    
    # 随机打乱用于采样
    random.seed(42)
    random.shuffle(all_data)
    
    tasks_data = []
    
    # 任务 0: 代码生成
    print("\n[任务 0] 代码生成...")
    gen_count = sample_sizes["generation"] or len(all_data)
    for sample in all_data[:gen_count]:
        tasks_data.append({
            "instruction": sample["instruction"],
            "output": sample["output"],
            "task_id": 0,
            "task_name": "generation"
        })
    print(f"  样本数: {len(tasks_data)}")
    
    # 任务 1: 代码摘要 (逆向任务)
    print("[任务 1] 代码摘要...")
    summ_count = min(sample_sizes["summarization"], len(all_data))
    summ_start = len(tasks_data)
    for sample in all_data[:summ_count]:
        tasks_data.append({
            "instruction": f"Summarize the following code:\n{sample['output']}",
            "output": sample["instruction"],
            "task_id": 1,
            "task_name": "summarization"
        })
    print(f"  样本数: {len(tasks_data) - summ_start}")
    
    # 任务 2: 代码补全
    print("[任务 2] 代码补全...")
    comp_count = min(sample_sizes["completion"], len(all_data))
    comp_start = len(tasks_data)
    for sample in all_data[:comp_count]:
        code_lines = sample["output"].split("\n")
        if len(code_lines) < 3:
            continue
        
        # 遮蔽后半部分代码
        mask_point = len(code_lines) // 2
        partial_code = "\n".join(code_lines[:mask_point])
        full_code = sample["output"]
        
        tasks_data.append({
            "instruction": f"Complete the following code:\n{partial_code}\n[MASK]",
            "output": full_code,
            "task_id": 2,
            "task_name": "completion"
        })
    print(f"  样本数: {len(tasks_data) - comp_start}")
    
    # 任务 3: 语法检查
    print("[任务 3] 语法检查...")
    syntax_count = min(sample_sizes["syntax_check"], len(all_data))
    syntax_start = len(tasks_data)
    for sample in all_data[:syntax_count]:
        code = sample["output"]
        
        # 50% 正确代码
        tasks_data.append({
            "instruction": f"Check if this code has syntax errors:\n{code}",
            "output": "No syntax errors detected.",
            "task_id": 3,
            "task_name": "syntax_check"
        })
        
        # 50% 注入错误
        if random.random() < 0.5:
            buggy_code = code.replace(":", "").replace("==", "=")
            if buggy_code != code:
                tasks_data.append({
                    "instruction": f"Check if this code has syntax errors:\n{buggy_code}",
                    "output": "Syntax error detected: missing colons or incorrect operators.",
                    "task_id": 3,
                    "task_name": "syntax_check"
                })
    print(f"  样本数: {len(tasks_data) - syntax_start}")
    
    print(f"\n[总计] {len(tasks_data)} 个训练样本")
    print("=" * 50)
    
    return tasks_data


# ==================== 多任务模型 (已修复) ====================

class MultiTaskT5Trainer(Trainer):
    """支持任务加权的自定义训练器"""
    
    def __init__(self, task_weights, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.task_weights = torch.tensor(task_weights, dtype=torch.float32)
        if torch.cuda.is_available():
            self.task_weights = self.task_weights.cuda()
    
    def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
        """加权损失函数 (已修复重大 Bug)
        
        原 Bug: loss 被 batch_size 除了两次,导致损失值极小
        修复: 只按 task_id 加权,让 Trainer 处理批次平均
        
        Args:
            model: 训练的模型
            inputs: 输入批次字典
            return_outputs: 是否返回模型输出
            num_items_in_batch: 批次样本数 (Transformers 4.x 新增)
        """
        task_ids = inputs.pop("task_ids")  # [batch_size]
        
        # 前向传播
        outputs = model(**inputs)
        base_loss = outputs.loss  # 已在批次上平均
        
        # 应用任务加权 (修复后的逻辑)
        batch_weights = self.task_weights[task_ids]  # [batch_size]
        avg_weight = batch_weights.mean()  # 标量
        
        weighted_loss = base_loss * avg_weight
        
        return (weighted_loss, outputs) if return_outputs else weighted_loss


# ==================== 训练收敛可视化 ====================
def _save_training_convergence(trainer, out_dir):
    """保存训练日志和收敛曲线"""
    os.makedirs(out_dir, exist_ok=True)
    history = list(getattr(trainer.state, "log_history", []))
    if not history:
        print("[WARN] 未获取到训练日志历史，跳过收敛曲线绘制")
        return

    # 提取训练和验证损失
    train_steps, train_losses = [], []
    eval_steps, eval_losses = [], []
    for rec in history:
        step = rec.get("step")
        if step is None:
            continue
        if "loss" in rec:
            train_steps.append(step)
            train_losses.append(rec["loss"])
        if "eval_loss" in rec:
            eval_steps.append(step)
            eval_losses.append(rec["eval_loss"])

    # 保存 CSV 日志
    csv_path = os.path.join(out_dir, "training_log.csv")
    try:
        import csv
        with open(csv_path, "w", newline="", encoding="utf-8") as f:
            writer = csv.writer(f)
            writer.writerow(["type", "step", "loss"])
            for s, l in zip(train_steps, train_losses):
                writer.writerow(["train", s, l])
            for s, l in zip(eval_steps, eval_losses):
                writer.writerow(["eval", s, l])
        print(f"[SAVED] 训练日志CSV: {csv_path}")
    except Exception as e:
        print(f"[WARN] 保存训练日志CSV失败: {e}")

    # 绘制并保存收敛曲线
    try:
        import matplotlib.pyplot as plt
        plt.figure(figsize=(8, 5), dpi=120)
        if train_steps:
            plt.plot(train_steps, train_losses, label="train loss", color="#1f77b4", linewidth=1.5)
        if eval_steps:
            plt.plot(eval_steps, eval_losses, label="eval loss", color="#ff7f0e", linestyle="--", linewidth=1.5)
        plt.xlabel("step")
        plt.ylabel("loss")
        plt.title("Multi-Task Training Convergence")
        plt.grid(True, alpha=0.3)
        plt.legend()
        png_path = os.path.join(out_dir, "training_convergence.png")
        plt.tight_layout()
        plt.savefig(png_path)
        plt.close()
        print(f"[SAVED] 收敛曲线图: {png_path}")
    except ImportError:
        print("[WARN] 未安装 matplotlib，已跳过曲线绘制。可执行 'pip install matplotlib' 后重训或根据 CSV 自行绘制。")
    except Exception as e:
        print(f"[WARN] 绘制收敛曲线失败: {e}")


# ==================== 检查点兼容性检查 ====================
def _is_checkpoint_compatible(ckpt_dir):
    """检查检查点是否与当前 LoRA 配置兼容"""
    try:
        cfg_path = os.path.join(ckpt_dir, "adapter_config.json")
        with open(cfg_path, "r", encoding="utf-8") as f:
            cfg = json.load(f)
        # 检查 LoRA rank 和目标模块是否匹配
        cr = int(cfg.get("r")) if cfg.get("r") is not None else None
        tm = cfg.get("target_modules")
        tm = [str(x) for x in (tm or [])]
        return (cr == 8) and (sorted(tm) == sorted(["q", "v"]))
    except Exception:
        return False


# ==================== 主训练流程 (优化版) ====================

def main(args):
    print("=" * 50)
    print("   多任务学习训练 v2 (优化版)")
    print("=" * 50)
    print(f"Task weights: {args.task_weights}")
    print(f"Epochs: {args.epochs}")
    print(f"Batch Size: {args.batch_size}")
    print(f"Workers: {NUM_WORKERS}")
    print("")
    
    # 解析任务权重
    task_weights = [float(x) for x in args.task_weights.split(",")]
    assert len(task_weights) == 4, "需要 4 个任务权重"
    
    # 加载分词器和模型
    print("加载模型...")
    model_path = get_model_path()
    
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
    
    # 应用 LoRA
    lora_config = LoraConfig(
        r=8,
        lora_alpha=32,
        target_modules=["q", "v"],
        lora_dropout=0.1,
        bias="none",
        task_type="SEQ_2_SEQ_LM"
    )
    model = get_peft_model(model, lora_config)
    
    trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
    total = sum(p.numel() for p in model.parameters())
    print(f"可训练参数: {trainable:,} / {total:,} ({100*trainable/total:.2f}%)")
    print("")
    
    # 准备多任务数据 (优化: 单次文件读取)
    train_data = prepare_all_tasks_optimized(
        "data/processed/complex_train_text2code.jsonl",
        sample_sizes={
            "generation": args.gen_samples,
            "summarization": args.summ_samples,
            "completion": args.comp_samples,
            "syntax_check": args.syntax_samples
        }
    )
    
    # 并行分词
    print("\n并行分词处理中...")
    def tokenize_function(examples):
        # 分词输入
        model_inputs = tokenizer(
            examples["instruction"],
            max_length=224,
            truncation=True,
            padding=False
        )
        
        # 分词标签
        labels = tokenizer(
            examples["output"],
            max_length=224,
            truncation=True,
            padding=False
        )
        
        # 添加标签
        model_inputs["labels"] = labels["input_ids"]
        
        # 重要: batched 模式下 task_id 已是列表,直接赋值
        model_inputs["task_ids"] = examples["task_id"]
        
        return model_inputs
    
    # 转换为 Dataset
    train_dataset = Dataset.from_dict({
        "instruction": [x["instruction"] for x in train_data],
        "output": [x["output"] for x in train_data],
        "task_id": [x["task_id"] for x in train_data]
    })
    
    # 多进程并行分词
    train_dataset = train_dataset.map(
        tokenize_function,
        batched=True,
        remove_columns=["instruction", "output", "task_id"],  # 移除 task_id, 保留 task_ids
        num_proc=8  # 8 进程并行
    )
    
    # 调试: 验证 task_ids 字段
    print(f"\n[DEBUG] Dataset columns after tokenization: {train_dataset.column_names}")
    print(f"[DEBUG] First sample keys: {list(train_dataset[0].keys())}")
    if len(train_dataset) > 0:
        sample = train_dataset[0]
        print(f"[DEBUG] Sample has task_ids: {'task_ids' in sample}")
        if 'task_ids' in sample:
            print(f"[DEBUG] task_ids value: {sample['task_ids']}")
    
    # 验证集 (取小子集)
    valid_size = min(1000, len(train_dataset) // 10)
    valid_dataset = train_dataset.select(range(valid_size))
    
    print(f"训练集: {len(train_dataset)} 样本")
    print(f"验证集: {len(valid_dataset)} 样本")
    print("")
    
    # 训练配置 (优化版)
    training_args = TrainingArguments(
        output_dir="model/multitask_lora",
        num_train_epochs=args.epochs,
        per_device_train_batch_size=args.batch_size,
        per_device_eval_batch_size=args.batch_size,
        gradient_accumulation_steps=GRAD_ACCUM,
        learning_rate=1e-4,  # 降低学习率以提升多任务稳定性
        warmup_ratio=0.1,
        logging_steps=100,
        save_steps=1000,
        eval_steps=1000,
        save_total_limit=2,
        fp16=torch.cuda.is_available(),
        eval_strategy="steps",
        load_best_model_at_end=True,
        report_to="none",
        max_grad_norm=1.0,  # 梯度裁剪防止爆炸
        # 性能优化 (已禁用 dataloader workers 因自定义 collator)
        dataloader_num_workers=0,  # 禁用: 自定义 collator 的序列化问题
        dataloader_prefetch_factor=None,
        dataloader_persistent_workers=False,
        dataloader_pin_memory=True if torch.cuda.is_available() else False,
        optim="adamw_torch_fused" if torch.cuda.is_available() else "adamw_torch",
        # torch.compile 已禁用: 与 LoRA 不兼容 (BackendCompilerFailed)
    )
    
    print("已启用性能优化:")
    print(f"  - Fused AdamW: {training_args.optim == 'adamw_torch_fused'}")
    print(f"  - torch.compile: 已禁用 (与 LoRA 不兼容)")
    print(f"  - DataLoader workers: 0 (因兼容自定义 collator 已禁用)")
    print(f"  - Prefetch factor: N/A")
    print(f"  - 并行分词: 8 进程")
    print("")
    
    # 数据整理器 (处理 task_ids) - 已修复版本
    def custom_data_collator(features):
        # 关键: 在传给 DataCollatorForSeq2Seq 前提取 task_ids
        # 标准 collator 可能过滤自定义字段
        task_ids = []
        for f in features:
            # 提取并移除 task_ids
            task_id = f.get("task_ids", 0)  # 缺失时默认 0
            task_ids.append(task_id)
            
            # 从特征字典中删除避免冲突
            if "task_ids" in f:
                del f["task_ids"]
        
        # 转换为张量
        task_ids_tensor = torch.tensor(task_ids, dtype=torch.long)
        
        # 用标准 collator 处理其余字段 (input_ids, attention_mask, labels)
        batch = DataCollatorForSeq2Seq(tokenizer, model=model)(features)
        
        # 将 task_ids 加回批次
        batch["task_ids"] = task_ids_tensor
        
        return batch
    
    # 创建训练器 (已修复损失计算)
    trainer = MultiTaskT5Trainer(
        task_weights=task_weights,
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=valid_dataset,
        tokenizer=tokenizer,
        data_collator=custom_data_collator,
    )
    
    # 开始训练
    print("开始多任务训练...")
    
    # 检查现有检查点并处理恢复
    output_dir = "model/multitask_lora"
    last_checkpoint = None
    if os.path.isdir(output_dir):
        last_checkpoint = get_last_checkpoint(output_dir)
    
    resume_path = None
    if last_checkpoint:
        if RESUME_POLICY == "never":
            print(f"[INFO] Found checkpoint {last_checkpoint}, but RESUME_POLICY=never, starting fresh")
            resume_path = None
        elif RESUME_POLICY == "always":
            print(f"[INFO] 从检查点恢复: {last_checkpoint}")
            resume_path = last_checkpoint
        else:  # auto
            if _is_checkpoint_compatible(last_checkpoint):
                print(f"[INFO] 从兼容检查点恢复: {last_checkpoint}")
                resume_path = last_checkpoint
            else:
                print(f"[WARN] 检查点 {last_checkpoint} 与当前配置不兼容,从头开始")
                resume_path = None
    
    if resume_path:
        trainer.train(resume_from_checkpoint=resume_path)
    else:
        trainer.train()
    
    # 保存训练收敛曲线和日志
    _save_training_convergence(trainer, "model/multitask_lora")
    
    # 保存模型
    model.save_pretrained("model/multitask_lora")
    tokenizer.save_pretrained("model/multitask_lora")
    
    # 保存配置
    config = {
        "task_weights": task_weights,
        "tasks": ["generation", "summarization", "completion", "syntax_check"],
        "train_samples": len(train_dataset),
        "epochs": args.epochs,
        "batch_size": args.batch_size,
        "optimizations": {
            "fused_adamw": True,
            "torch_compile": False,  # 因 LoRA 兼容性已禁用
            "num_workers": NUM_WORKERS,
            "prefetch_factor": PREFETCH_FACTOR,
            "parallel_tokenization": 8
        }
    }
    
    with open("model/multitask_lora/config.json", "w") as f:
        json.dump(config, f, indent=2)
    
    print("\n[成功] 多任务训练完成!")
    print(f"模型已保存至: model/multitask_lora")


def parse_none_or_int(value):
    """解析可为 None 或 int 的参数"""
    if value.lower() == 'none':
        return None
    return int(value)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="多任务学习训练")
    parser.add_argument("--task_weights", type=str, default="1.0,0.5,0.4,0.3",
                        help="4 个任务的权重 (生成,摘要,补全,语法)")
    parser.add_argument("--epochs", type=int, default=4, help="训练轮数")
    parser.add_argument("--batch_size", type=int, default=BATCH_SIZE, help="批次大小")
    
    # 各任务样本数控制
    parser.add_argument("--gen_samples", type=parse_none_or_int, default=None, 
                        help="代码生成样本数 (使用 'None' 表示全部)")
    parser.add_argument("--summ_samples", type=int, default=5000, help="代码摘要样本数")
    parser.add_argument("--comp_samples", type=int, default=3000, help="代码补全样本数")
    parser.add_argument("--syntax_samples", type=int, default=3000, help="语法检查样本数")
    
    args = parser.parse_args()
    main(args)
