#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
对比学习预训练编码器初始化训练脚本

实验目标:
- 验证对比学习预训练对下游任务性能的提升
- 对比: 随机初始化 vs 对比学习初始化

使用方法:
    python train_with_contrastive_init.py --epochs 3
"""

import os
import sys
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, Trainer, TrainingArguments, DataCollatorForSeq2Seq
from peft import LoraConfig, get_peft_model
from datasets import load_dataset
import json

# 添加项目根目录到路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

def load_contrastive_encoder(encoder_path, model):
    """加载对比学习预训练编码器权重"""
    print(f"\n[INFO] 从以下路径加载对比学习预训练编码器: {encoder_path}")
    
    if not os.path.exists(encoder_path):
        print(f"[WARN] 未找到编码器: {encoder_path}")
        print("[INFO] 将使用随机初始化")
        return model
    
    try:
        # 加载预训练编码器权重
        encoder_state = torch.load(encoder_path, map_location="cpu")
        
        # 加载到模型编码器
        model.encoder.load_state_dict(encoder_state, strict=False)
        
        print("[成功] 对比学习编码器加载成功!")
        print("[INFO] 编码器已使用从 226K 样本学习的语义表示初始化")
        
        return model
    except Exception as e:
        print(f"[错误] 加载编码器失败: {e}")
        print("[INFO] 继续使用随机初始化")
        return model


def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--encoder_path", type=str, 
                       default="model/contrastive_pretrained/contrastive_encoder.pt",
                       help="对比学习预训练编码器路径")
    parser.add_argument("--output_dir", type=str,
                       default="model/text2code_lora_contrastive",
                       help="输出目录")
    parser.add_argument("--epochs", type=int, default=3, help="训练轮数")
    parser.add_argument("--batch_size", type=int, default=16, help="批次大小")
    parser.add_argument("--use_contrastive", action="store_true",
                       help="使用对比学习预训练编码器 (默认: False 为基线)")
    args = parser.parse_args()
    
    print("=" * 80)
    print("对比学习初始化训练")
    print("=" * 80)
    print(f"对比学习编码器: {args.encoder_path}")
    print(f"使用对比学习初始化: {args.use_contrastive}")
    print(f"输出目录: {args.output_dir}")
    print("")
    
    # 加载基础模型
    base_model_path = "models/Salesforce_codet5-base"
    if not os.path.exists(base_model_path):
        base_model_path = "Salesforce/codet5-base"
    
    print(f"[INFO] 加载基础模型: {base_model_path}")
    tokenizer = AutoTokenizer.from_pretrained(base_model_path)
    model = AutoModelForSeq2SeqLM.from_pretrained(base_model_path)
    
    # 如果启用，加载对比学习编码器
    if args.use_contrastive:
        model = load_contrastive_encoder(args.encoder_path, model)
    else:
        print("[INFO] 使用随机初始化 (基线)")
    
    # 应用 LoRA
    lora_config = LoraConfig(
        r=8,
        lora_alpha=32,
        target_modules=["q", "v"],
        lora_dropout=0.1,
        bias="none",
        task_type="SEQ_2_SEQ_LM"
    )
    model = get_peft_model(model, lora_config)
    
    trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
    total = sum(p.numel() for p in model.parameters())
    print(f"\n[模型] 可训练参数: {trainable:,} / {total:,} ({100*trainable/total:.2f}%)")
    
    # 加载训练数据集
    print("\n[INFO] 加载训练数据集...")
    data_file = "data/processed/complex_train_text2code.jsonl"
    
    if not os.path.exists(data_file):
        print(f"[错误] 未找到数据文件: {data_file}")
        print("[提示] 请先运行数据预处理脚本")
        return
    
    # 使用 datasets 库加载 JSONL
    dataset = load_dataset('json', data_files={'train': data_file})
    train_dataset = dataset['train']
    
    # 取一小部分作为验证集
    split = train_dataset.train_test_split(test_size=0.1, seed=42)
    train_dataset = split['train']
    eval_dataset = split['test']
    
    print(f"[INFO] 训练集样本数: {len(train_dataset)}")
    print(f"[INFO] 验证集样本数: {len(eval_dataset)}")
    
    # 数据预处理函数
    def preprocess_function(examples):
        """将数据转换为模型输入格式"""
        # 输入: instruction
        inputs = examples['instruction']
        # 输出: output (代码)
        targets = examples['output']
        
        # 分词
        model_inputs = tokenizer(
            inputs,
            max_length=256,
            truncation=True,
            padding=False
        )
        
        # 标签也需要分词
        labels = tokenizer(
            targets,
            max_length=256,
            truncation=True,
            padding=False
        )
        
        model_inputs["labels"] = labels["input_ids"]
        return model_inputs
    
    # 应用预处理
    print("\n[INFO] 预处理数据...")
    train_dataset = train_dataset.map(
        preprocess_function,
        batched=True,
        remove_columns=train_dataset.column_names,
        desc="预处理训练集"
    )
    
    eval_dataset = eval_dataset.map(
        preprocess_function,
        batched=True,
        remove_columns=eval_dataset.column_names,
        desc="预处理验证集"
    )
    
    # 数据整理器
    data_collator = DataCollatorForSeq2Seq(
        tokenizer=tokenizer,
        model=model,
        padding=True
    )
    
    # 配置训练参数
    training_args = TrainingArguments(
        output_dir=args.output_dir,
        num_train_epochs=args.epochs,
        per_device_train_batch_size=args.batch_size,
        per_device_eval_batch_size=args.batch_size,
        learning_rate=1e-4,
        warmup_steps=500,
        logging_steps=100,
        save_steps=1000,
        eval_steps=1000,
        save_total_limit=2,
        fp16=torch.cuda.is_available(),
        evaluation_strategy="steps",
        load_best_model_at_end=True,
        report_to="none",
        gradient_accumulation_steps=2,
    )
    
    # 创建训练器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        tokenizer=tokenizer,
        data_collator=data_collator,
    )
    
    # 开始训练
    print("\n" + "=" * 80)
    print(f"开始训练 ({'对比学习初始化' if args.use_contrastive else '随机初始化'})")
    print("=" * 80 + "\n")
    
    trainer.train()
    
    # 保存模型
    print("\n[INFO] 保存模型...")
    model.save_pretrained(args.output_dir)
    tokenizer.save_pretrained(args.output_dir)
    
    # 保存训练配置
    config_info = {
        "use_contrastive": args.use_contrastive,
        "encoder_path": args.encoder_path if args.use_contrastive else None,
        "epochs": args.epochs,
        "batch_size": args.batch_size,
        "train_samples": len(train_dataset),
        "eval_samples": len(eval_dataset),
    }
    
    with open(os.path.join(args.output_dir, "training_config.json"), "w") as f:
        json.dump(config_info, f, indent=2)
    
    print(f"\n[成功] 训练完成!")
    print(f"[INFO] 模型已保存至: {args.output_dir}")
    print(f"[提示] 与基线对比结果 (不使用 --use_contrastive)")
    print(f"[提示] 评估命令: python evaluate_*.py --model_path {args.output_dir}")


if __name__ == "__main__":
    main()
