#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Text2Code 全自动化训练系统
基于 Transformer (CodeT5) + 前沿 NLP 优化技术

=== Transformer 架构 ===
- 基础模型: CodeT5-base (T5 Encoder-Decoder)
- 微调技术: LoRA (Low-Rank Adaptation)
- 注意力机制: Multi-Head Self-Attention + Cross-Attention

=== NLP 优化技术 ===
1. 知识蒸馏 (Knowledge Distillation)
   - Teacher: CodeT5-base → Student: CodeT5-small
   - KL 散度损失 + 温度缩放
   
2. 对比学习 (Contrastive Learning)
   - Triplet Loss (anchor, positive, negative)
   - 学习更好的代码语义表示
   
3. 多任务学习 (Multi-Task Learning)
   - 代码生成、代码摘要、代码补全、语法检查
   - 任务权重动态调整
   
4. Few-Shot Learning
   - Prompt Engineering
   - 示例引导生成

=== 完整训练流程 ===
阶段1: 数据预处理 (CodeSearchNet + MBPP)
阶段2: 基础 LoRA 微调 (Transformer 主训练)
阶段3: 知识蒸馏 (模型压缩)
阶段4: 多任务学习 (泛化增强)
阶段5: 对比学习 (表征优化)
阶段6: 模型评估与可视化

=== 使用方法 ===
# 完整自动化流程（推荐）
python main.py --full-auto

# 单独执行某个阶段
python main.py --stage preprocess
python main.py --stage train
python main.py --stage distill
python main.py --stage multitask
python main.py --stage contrastive
python main.py --stage evaluate

# 自定义训练参数
python main.py --full-auto --batch-size 16 --epochs 5

# 跳过已完成的阶段继续执行
python main.py --full-auto --resume
"""

import os
import sys
import argparse
import time
import subprocess
import json
from datetime import datetime
from pathlib import Path

# 仅在需要时使用 PyTorch
try:
    import torch  # type: ignore
except ImportError:
    torch = None

# ===================== #
# 硬件优化配置 (RTX 5090 32GB 专属优化)
# ===================== #
class HardwareConfig:
    """硬件配置参数 - RTX 5090 32GB + 25 vCPU + 90GB RAM"""
    
    # GPU 配置 (RTX 5090 32GB)
    GPU_NAME = "RTX 5090"
    GPU_MEMORY = 32  # GB
    NUM_GPUS = 1
    
    # CPU 配置 (Intel Xeon Platinum 8470Q)
    CPU_CORES = 25
    CPU_MODEL = "Intel(R) Xeon(R) Platinum 8470Q"
    
    # 内存配置
    MEMORY_GB = 90
    
    # CUDA 配置
    CUDA_VERSION = "12.8"
    PYTORCH_VERSION = "2.8.0"
    PYTHON_VERSION = "3.12"
    
    # 优化参数（32GB GPU 稳定配置）
    MAX_BATCH_SIZE = 16  # 批次16稳定运行，避免 OOM
    MAX_BATCH_SIZE_DISTILL = 8  # 知识蒸馏（需要加载两个模型）
    MAX_BATCH_SIZE_MULTITASK = 12  # 多任务学习
    MAX_BATCH_SIZE_CONTRASTIVE = 32  # 对比学习（编码器单向，批次可翻倍）
    MAX_GRADIENT_ACCUMULATION = 2  # 梯度累积补偿（等效批次32）
    
    # Transformer 模型配置
    BASE_MODEL = "Salesforce/codet5-base"  # T5-based Encoder-Decoder (220M params)
    STUDENT_MODEL = "Salesforce/codet5-small"  # 60M params
    
    # LoRA 微调参数（参数高效微调）
    LORA_R = 8  # 低秩分解的秩
    LORA_ALPHA = 32  # 缩放因子
    LORA_DROPOUT = 0.1
    LORA_TARGET_MODULES = ["q", "v"]  # 对 Attention 的 Q、V 矩阵微调
    
    # 混合精度训练（保守配置，避免兼容性问题）
    USE_FP16 = False  # PEFT + FP16 存在类型转换问题，禁用
    USE_BF16 = False  # PEFT + BF16 可能与 accelerate 冲突，暂禁用
    USE_GRADIENT_CHECKPOINTING = False  # LoRA 参数量小，无需梯度检查点
    
    # DeepSpeed 优化（推荐启用，节省显存 30%）
    USE_DEEPSPEED = True  # 启用 DeepSpeed ZeRO-2
    DEEPSPEED_CONFIG = "ds_config_zero2.json"  # DeepSpeed 配置文件路径
    # 注意：FP32 + DeepSpeed 既稳定又高效，无需混合精度
    
    # 序列长度（利用大显存）
    MAX_SOURCE_LENGTH = 512  # 输入（自然语言描述）
    MAX_TARGET_LENGTH = 512  # 输出（代码）
    
    # 并行处理（充分利用 25 vCPU）
    NUM_WORKERS = 12  # 数据加载线程数（留部分CPU给训练）
    PREFETCH_FACTOR = 4  # 预取批次数
    PREPROCESS_WORKERS = 16  # 预处理并行数（利用多核CPU）
    
    # 优化器配置（PyTorch 2.8.0 支持）
    OPTIMIZER = "adamw_torch_fused"  # Fused AdamW（~15% 加速）
    USE_TORCH_COMPILE = False  # torch.compile 与 LoRA+BF16 有兼容性问题，暂时禁用
    TORCH_COMPILE_MODE = "reduce-overhead"  # 编译模式（最优性能）
    
    # 知识蒸馏参数
    DISTILL_TEMPERATURE = 2.0  # 温度参数（软标签平滑）
    DISTILL_ALPHA = 0.7  # 蒸馏损失权重
    
    # 对比学习参数
    CONTRASTIVE_MARGIN = 0.5  # Triplet Loss margin
    CONTRASTIVE_PROJECTION_DIM = 256  # 投影维度
    
    # 多任务学习权重
    MULTITASK_WEIGHTS = [1.0, 0.5, 0.4, 0.3]  # [生成, 摘要, 补全, 语法]
    
    @classmethod
    def verify_config(cls):
        """验证配置是否匹配 RTX 5090"""
        if torch is None:
            print("[WARN] PyTorch 未安装，无法验证GPU配置")
            return False
        
        if not torch.cuda.is_available():
            print("[ERROR] CUDA 不可用！")
            return False
        
        gpu_name = torch.cuda.get_device_name(0)
        gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
        cuda_version = torch.version.cuda
        pytorch_version = torch.__version__
        
        print(f"\n{'='*80}")
        print("硬件配置验证 (RTX 5090 专属)")
        print(f"{'='*80}")
        print(f"[OK] GPU: {gpu_name} ({gpu_memory:.0f}GB)")
        print(f"[OK] CUDA: {cuda_version}")
        print(f"[OK] PyTorch: {pytorch_version}")
        print(f"[OK] Python: {sys.version.split()[0]}")
        
        # 验证是否为 RTX 5090
        if "5090" in gpu_name and gpu_memory >= 30:
            print(f"[OK] 检测到 RTX 5090！优化参数已启用")
            print(f"  - 批次大小: {cls.MAX_BATCH_SIZE} (充分利用 32GB 显存)")
            print(f"  - 混合精度: BF16 (Ada Lovelace 架构)")
            print(f"  - CPU 并行: {cls.NUM_WORKERS} workers (25 vCPU)")
            print(f"  - 编译加速: torch.compile (PyTorch 2.8.0)")
            print(f"{'='*80}\n")
            return True
        else:
            print(f"[WARN] 当前 GPU ({gpu_name}) 不是 RTX 5090")
            print(f"[WARN] 性能参数可能需要调整")
            print(f"{'='*80}\n")
            return False

# ===================== #
# 项目配置
# ===================== #
class Config:
    """项目配置类"""
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    DATA_RAW_DIR = os.path.join(BASE_DIR, "data", "raw")
    DATA_PROCESSED_DIR = os.path.join(BASE_DIR, "data", "processed")
    
    # 模型输出目录
    MODEL_BASE_DIR = os.path.join(BASE_DIR, "model")
    MODEL_OUTPUT_DIR = os.path.join(MODEL_BASE_DIR, "text2code_lora")
    MODEL_DISTILL_DIR = os.path.join(MODEL_BASE_DIR, "distilled_model")
    MODEL_MULTITASK_DIR = os.path.join(MODEL_BASE_DIR, "multitask_lora")
    MODEL_CONTRASTIVE_DIR = os.path.join(MODEL_BASE_DIR, "contrastive_pretrained")
    
    # 脚本路径（更新为新的目录结构）
    PREPROCESS_SCRIPT = os.path.join(BASE_DIR, "utils", "preprocess_data.py")
    TRAIN_SCRIPT = os.path.join(BASE_DIR, "scripts", "train_text2code.py")
    DISTILL_SCRIPT = os.path.join(BASE_DIR, "scripts", "distillation_train.py")
    MULTITASK_SCRIPT = os.path.join(BASE_DIR, "scripts", "multitask_train.py")
    CONTRASTIVE_SCRIPT = os.path.join(BASE_DIR, "scripts", "contrastive_pretrain.py")
    
    INFERENCE_SCRIPT = os.path.join(BASE_DIR, "utils", "inference.py")
    
    # 评估脚本
    EVAL_MBPP_SCRIPT = os.path.join(BASE_DIR, "evaluation", "evaluate_mbpp.py")
    EVAL_CODESEARCHNET_SCRIPT = os.path.join(BASE_DIR, "evaluation", "evaluate_codesearchnet.py")
    VISUALIZE_SCRIPT = os.path.join(BASE_DIR, "evaluation", "visualize_results.py")
    
    # 结果输出
    RESULTS_DIR = os.path.join(BASE_DIR, "results")
    LOGS_DIR = os.path.join(BASE_DIR, "logs")
    
    # 训练参数（RTX 5090 优化）
    DEFAULT_BATCH_SIZE = HardwareConfig.MAX_BATCH_SIZE
    DEFAULT_EPOCHS = 5  # RTX 5090 训练速度快，可增加轮数
    DEFAULT_LEARNING_RATE = 2e-4  # LoRA 推荐学习率
    DEFAULT_MAX_LENGTH = HardwareConfig.MAX_SOURCE_LENGTH
    
    # Transformer 模型路径
    BASE_MODEL = HardwareConfig.BASE_MODEL
    STUDENT_MODEL = HardwareConfig.STUDENT_MODEL

# ===================== #
# 日志和工具函数
# ===================== #
def ensure_directories():
    """确保所有必要目录存在"""
    dirs = [
        Config.DATA_RAW_DIR,
        Config.DATA_PROCESSED_DIR,
        Config.MODEL_BASE_DIR,
        Config.RESULTS_DIR,
        Config.LOGS_DIR,
    ]
    for d in dirs:
        os.makedirs(d, exist_ok=True)

def get_log_file(stage):
    """获取日志文件路径"""
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    return os.path.join(Config.LOGS_DIR, f"{stage}_{timestamp}.log")

def print_banner(text, char="=", width=80):
    """打印横幅"""
    print(f"\n{char * width}")
    print(f"{text:^{width}}")
    print(f"{char * width}\n")

def print_step(step_num, total_steps, description):
    """打印步骤信息"""
    print(f"\n{'='*80}")
    print(f"[STEP {step_num}/{total_steps}] {description}")
    print(f"[TIME] {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"{'='*80}\n")

def run_script(script_path, description, check_output=True):
    """执行Python脚本"""
    if not os.path.exists(script_path):
        print(f"[ERROR] 找不到脚本文件 {script_path}")
        return False
    
    print(f"[RUN] 正在执行: {description}")
    print(f"[PATH] 脚本路径: {script_path}")
    
    try:
        start_time = time.time()
        result = subprocess.run([sys.executable, script_path], 
                              capture_output=check_output, 
                              text=True, 
                              cwd=Config.BASE_DIR)
        
        elapsed_time = time.time() - start_time
        
        if result.returncode == 0:
            print(f"[SUCCESS] {description} 执行成功! (耗时: {elapsed_time:.2f}秒)")
            if check_output and result.stdout:
                print("[OUTPUT]")
                print(result.stdout)
            return True
        else:
            print(f"[ERROR] {description} 执行失败!")
            if result.stderr:
                print("[ERROR INFO]")
                print(result.stderr)
            return False
            
    except Exception as e:
        print(f"[EXCEPTION] 执行 {description} 时发生异常: {str(e)}")
        return False

def run_script_with_args(cmd, description, log_file=None, check_output=False):
    """执行带参数的Python脚本并记录日志"""
    print(f"[RUN] 正在执行: {description}")
    print(f"[CMD] {' '.join(cmd)}")
    
    try:
        start_time = time.time()
        
        # 打开日志文件
        if log_file:
            with open(log_file, 'w', encoding='utf-8') as log:
                log.write(f"Command: {' '.join(cmd)}\n")
                log.write(f"Start Time: {datetime.now()}\n")
                log.write("="*80 + "\n\n")
                
                # 实时输出到终端和日志
                process = subprocess.Popen(
                    cmd,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                    text=True,
                    cwd=Config.BASE_DIR,
                    bufsize=1
                )
                
                for line in process.stdout:
                    print(line, end='')
                    log.write(line)
                    log.flush()
                
                process.wait()
                returncode = process.returncode
        else:
            result = subprocess.run(
                cmd,
                capture_output=check_output,
                text=True,
                cwd=Config.BASE_DIR
            )
            returncode = result.returncode
        
        elapsed_time = time.time() - start_time
        
        if returncode == 0:
            print(f"\n[SUCCESS] {description} 执行成功! (耗时: {elapsed_time/60:.1f}分钟)")
            return True
        else:
            print(f"\n[ERROR] {description} 执行失败! (返回码: {returncode})")
            return False
            
    except Exception as e:
        print(f"[EXCEPTION] 执行 {description} 时发生异常: {str(e)}")
        return False

def check_environment():
    """检查运行环境 - RTX 5090 + Transformer & NLP 技术验证"""
    print_banner("环境检查 - RTX 5090 + Transformer & NLP 技术栈", "-")
    
    # 检查 Python 版本
    python_version = sys.version
    print(f"[PYTHON] Python版本: {python_version}")
    if sys.version_info < (3, 10):
        print("[WARN] 建议使用 Python 3.10+ (当前目标: 3.12)")
    
    # 检查 PyTorch 和 CUDA
    if torch is None:
        print("[ERROR] PyTorch 未安装!")
        print("[TIP] 安装命令: pip install torch==2.8.0 --index-url https://download.pytorch.org/whl/cu128")
        return False
    
    print(f"[PYTORCH] PyTorch版本: {torch.__version__}")
    if torch.__version__ < "2.8.0":
        print(f"[WARN] 当前版本 {torch.__version__}，建议升级到 PyTorch 2.8.0")
    
    # 验证 CUDA 可用性
    if not torch.cuda.is_available():
        print("[ERROR] CUDA 不可用！请检查 CUDA 安装")
        return False
    
    print(f"[CUDA] CUDA版本: {torch.version.cuda}")
    if torch.version.cuda != "12.8":
        print(f"[WARN] 当前 CUDA {torch.version.cuda}，目标配置为 CUDA 12.8")
    print(f"[CUDA] CUDA可用: [OK]")
    
    # GPU 信息验证
    gpu_count = torch.cuda.device_count()
    print(f"\n[GPU] 检测到 {gpu_count} 个GPU:")
    
    for i in range(gpu_count):
        gpu_name = torch.cuda.get_device_name(i)
        gpu_memory = torch.cuda.get_device_properties(i).total_memory / 1024**3
        compute_capability = torch.cuda.get_device_capability(i)
        print(f"  GPU {i}: {gpu_name}")
        print(f"    - 显存: {gpu_memory:.1f} GB")
        print(f"    - 计算能力: {compute_capability[0]}.{compute_capability[1]}")
    
    # 验证配置
    HardwareConfig.verify_config()
    
    # 检查关键依赖库
    print("[DEPENDENCIES] 检查关键依赖:")
    required_packages = {
        "transformers": ("Hugging Face Transformers", "4.36.0+"),
        "peft": ("PEFT (LoRA微调)", "0.7.0+"),
        "datasets": ("Hugging Face Datasets", "2.14.0+"),
        "torch": ("PyTorch (Transformer核心)", "2.8.0"),
    }
    
    missing_packages = []
    for package, (description, min_version) in required_packages.items():
        try:
            mod = __import__(package)
            version = getattr(mod, "__version__", "unknown")
            print(f"  [OK] {package:15s} {version:10s} - {description}")
        except ImportError:
            print(f"  [FAIL] {package:15s} {'':10s} - {description} [缺失, 需要 {min_version}]")
            missing_packages.append(package)
    
    if missing_packages:
        print(f"\n[ERROR] 缺少依赖包: {', '.join(missing_packages)}")
        print("[TIP] 安装命令: pip install -r requirements.txt")
        return False
    
    # 显示系统资源
    try:
        import psutil
        memory = psutil.virtual_memory()
        cpu_count = psutil.cpu_count(logical=True)
        print(f"\n[SYSTEM] 系统资源:")
        print(f"  - CPU核心: {cpu_count} vCPU (目标: 25 vCPU Intel Xeon Platinum 8470Q)")
        print(f"  - 总内存: {memory.total / 1024**3:.1f} GB (目标: 90GB)")
        print(f"  - 可用内存: {memory.available / 1024**3:.1f} GB")
    except:
        pass
    
    # 检查磁盘空间
    try:
        import shutil
        total, used, free = shutil.disk_usage(Config.BASE_DIR)
        print(f"\n[DISK] 磁盘空间:")
        print(f"  - 总空间: {total / 1024**3:.1f} GB")
        print(f"  - 已使用: {used / 1024**3:.1f} GB")
        print(f"  - 可用空间: {free / 1024**3:.1f} GB")
        
        if free / 1024**3 < 30:
            print("[WARN] 磁盘空间不足！建议至少保留 50GB（系统盘30GB + 数据盘50GB）")
    except:
        pass
    
    # 检查必要目录
    ensure_directories()
    
    # 检查数据文件
    raw_files = []
    if os.path.exists(Config.DATA_RAW_DIR):
        raw_files = [f for f in os.listdir(Config.DATA_RAW_DIR) 
                     if f.endswith('.jsonl')]
    
    print(f"\n[DATA] 原始数据文件: {len(raw_files)} 个")
    if len(raw_files) == 0:
        print("[WARNING] 未找到原始数据文件")
        print("[TIP] 请确保已下载 CodeSearchNet 和 MBPP 数据集到 data/raw/ 目录")
    
    # 打印 Transformer & NLP 技术栈
    print("\n" + "="*80)
    print("Transformer & NLP 技术栈 (RTX 5090 优化)")
    print("="*80)
    print("[OK] Transformer 架构: CodeT5 (T5-based Encoder-Decoder, 220M params)")
    print("[OK] 注意力机制: Multi-Head Self-Attention + Cross-Attention")
    print("[OK] 位置编码: T5-style Relative Position Encoding")
    print("[OK] LoRA 微调: 参数高效微调 (r=8, 仅 0.5% 参数)")
    print("[OK] 知识蒸馏: Teacher-Student 框架 + KL 散度 (T=2.0, α=0.7)")
    print("[OK] 对比学习: Triplet Loss + 语义表示 (margin=0.5)")
    print("[OK] 多任务学习: 4 个 NLP 任务联合训练 (权重 1.0/0.5/0.4/0.3)")
    print("[OK] Few-Shot Learning: Prompt Engineering (2-shot 示例)")
    print("[OK] 混合精度: BF16 (RTX 5090 Ada Lovelace 架构)")
    print("[OK] 生成策略: Beam Search (num_beams=4) + Temperature Sampling (T=0.4)")
    print("[OK] 优化器: Fused AdamW (~15% 加速)")
    print("[OK] 编译加速: torch.compile (reduce-overhead, ~20% 加速)")
    print("="*80)
    
    print("\n[[OK]] 环境检查通过!")
    return True


def check_processed_data():
    """检查处理后的数据是否存在"""
    required_files = [
        "complex_train_text2code.jsonl",
        "complex_valid_text2code.jsonl",
        "complex_test_text2code.jsonl",
        "simple_train_text2code.jsonl",
        "simple_valid_text2code.jsonl",
        "simple_test_text2code.jsonl",
    ]
    
    for filename in required_files:
        file_path = os.path.join(Config.DATA_PROCESSED_DIR, filename)
        if not os.path.exists(file_path):
            return False
    return True

def check_trained_model(model_dir):
    """检查训练好的模型是否存在"""
    if not os.path.exists(model_dir):
        return False
    
    # 检查必要的模型文件
    has_config = os.path.exists(os.path.join(model_dir, "adapter_config.json"))
    has_weights = (
        os.path.exists(os.path.join(model_dir, "adapter_model.safetensors"))
        or os.path.exists(os.path.join(model_dir, "adapter_model.bin"))
    )
    return bool(has_config and has_weights)

def run_interactive_test():
    """运行交互式推理测试"""
    print_banner("交互式测试", "-")
    
    test_queries = [
        "add two integers and return the sum",
        "find the maximum number in a list and return it",
        "sort a list of integers in ascending order and return a new list",
        "given a name and an age, return the string formatted as 'name-age'"
    ]
    
    print("[TEST] 运行预设测试用例:")
    for i, query in enumerate(test_queries, 1):
        print(f"\n[TEST {i}] {query}")
        print("-" * 60)
        
        # 这里可以调用inference.py的函数，或者通过subprocess
        try:
            # 导入推理模块
            sys.path.insert(0, Config.BASE_DIR)
            from utils.inference import generate_code
            code = generate_code(query)
            print("[GENERATED]")
            print(code)
        except Exception as e:
            print(f"[ERROR] 推理失败: {str(e)}")
    
    print(f"\n[COMPLETE] 测试完成! 您可以运行以下命令进行交互式测试:")
    print(f"python {Config.INFERENCE_SCRIPT}")

# ===================== #
# 主要执行函数
# ===================== #
def stage_1_preprocess_data():
    """阶段1: 数据预处理 (CodeSearchNet + MBPP)"""
    print_step(1, 6, "数据预处理")
    
    if check_processed_data():
        print("[SKIP] 发现已处理的数据文件，跳过预处理")
        return True
    
    log_file = get_log_file("01_preprocess")
    cmd = [sys.executable, Config.PREPROCESS_SCRIPT]
    
    return run_script_with_args(cmd, "数据预处理", log_file=log_file)

def stage_2_base_training():
    """阶段2: 基础 Transformer (CodeT5) + LoRA 微调训练"""
    print_step(2, 6, "基础 Transformer (CodeT5) + LoRA 微调")
    
    print("[INFO] Transformer 架构: CodeT5 (T5-based Encoder-Decoder)")
    print("[INFO] 微调技术: LoRA (Low-Rank Adaptation)")
    print(f"[INFO] LoRA 参数: r={HardwareConfig.LORA_R}, alpha={HardwareConfig.LORA_ALPHA}")
    print(f"[INFO] 目标模块: {HardwareConfig.LORA_TARGET_MODULES} (Attention 层)")
    
    # 检查 complex 模型
    complex_exists = check_trained_model(Config.MODEL_OUTPUT_DIR + "_complex")
    simple_exists = check_trained_model(Config.MODEL_OUTPUT_DIR + "_simple")
    
    if complex_exists and simple_exists:
        print("[SKIP] 发现已训练的基础模型（complex + simple）")
        return True
    
    log_file = get_log_file("02_base_training")
    
    # 设置环境变量（传递给训练脚本）
    env_vars = {
        "T2C_TRAIN_BSZ": str(HardwareConfig.MAX_BATCH_SIZE),
        "T2C_EPOCHS": str(Config.DEFAULT_EPOCHS),
        "T2C_LR": str(Config.DEFAULT_LEARNING_RATE),
        "T2C_MAX_SRC": str(HardwareConfig.MAX_SOURCE_LENGTH),
        "T2C_MAX_TGT": str(HardwareConfig.MAX_TARGET_LENGTH),
        "T2C_LORA_R": str(HardwareConfig.LORA_R),
        "T2C_LORA_ALPHA": str(HardwareConfig.LORA_ALPHA),
        "T2C_LORA_DROPOUT": str(HardwareConfig.LORA_DROPOUT),
        "T2C_LORA_TARGETS": ",".join(HardwareConfig.LORA_TARGET_MODULES),
        "T2C_TRAIN_KIND": "both",  # 训练 complex 和 simple 两个模型
        "T2C_NUM_WORKERS": str(HardwareConfig.NUM_WORKERS),
        "T2C_PREFETCH": str(HardwareConfig.PREFETCH_FACTOR),
        "T2C_OPTIM": HardwareConfig.OPTIMIZER,
        "T2C_TORCH_COMPILE": "true" if HardwareConfig.USE_TORCH_COMPILE else "false",
        "T2C_USE_DEEPSPEED": "true" if HardwareConfig.USE_DEEPSPEED else "false",
        "T2C_DEEPSPEED_CONFIG": HardwareConfig.DEEPSPEED_CONFIG,
    }
    
    # 混合精度设置
    if HardwareConfig.USE_BF16:
        env_vars["T2C_BF16"] = "true"
        env_vars["T2C_FP16"] = "false"
    elif HardwareConfig.USE_FP16:
        env_vars["T2C_FP16"] = "true"
        env_vars["T2C_BF16"] = "false"
    
    # 更新环境变量
    for key, value in env_vars.items():
        os.environ[key] = value
    
    cmd = [sys.executable, Config.TRAIN_SCRIPT]
    
    print(f"\n[CONFIG] 批次大小: {HardwareConfig.MAX_BATCH_SIZE}")
    print(f"[CONFIG] 训练轮数: {Config.DEFAULT_EPOCHS}")
    print(f"[CONFIG] 学习率: {Config.DEFAULT_LEARNING_RATE}")
    print(f"[CONFIG] 优化器: {HardwareConfig.OPTIMIZER}")
    print(f"[CONFIG] 混合精度: {'BF16' if HardwareConfig.USE_BF16 else 'FP16' if HardwareConfig.USE_FP16 else 'FP32'}")
    print(f"[CONFIG] torch.compile: {'启用' if HardwareConfig.USE_TORCH_COMPILE else '禁用'}")
    print(f"[CONFIG] DeepSpeed: {'[OK] 启用 ZeRO-2 (优化器CPU卸载)' if HardwareConfig.USE_DEEPSPEED else '✗ 禁用'}")
    
    return run_script_with_args(cmd, "基础 Transformer 训练", log_file=log_file)

def stage_3_knowledge_distillation():
    """阶段3: 知识蒸馏训练 (NLP 优化技术 #1)"""
    print_step(3, 6, "知识蒸馏训练 (Knowledge Distillation)")
    
    print("[NLP] 技术: 知识蒸馏 (Teacher-Student Framework)")
    print(f"[NLP] Teacher: {HardwareConfig.BASE_MODEL}")
    print(f"[NLP] Student: {HardwareConfig.STUDENT_MODEL}")
    print(f"[NLP] 温度参数: {HardwareConfig.DISTILL_TEMPERATURE} (软标签平滑)")
    print(f"[NLP] 蒸馏权重: {HardwareConfig.DISTILL_ALPHA} (KL散度损失)")
    print("[NLP] 原理: Student 学习 Teacher 的输出分布，实现模型压缩")
    
    if check_trained_model(Config.MODEL_DISTILL_DIR):
        print("[SKIP] 发现已蒸馏的模型")
        return True
    
    log_file = get_log_file("03_distillation")
    cmd = [
        sys.executable, Config.DISTILL_SCRIPT,
        "--temperature", str(HardwareConfig.DISTILL_TEMPERATURE),
        "--alpha", str(HardwareConfig.DISTILL_ALPHA),
        "--batch_size", str(HardwareConfig.MAX_BATCH_SIZE_DISTILL),
        "--epochs", str(Config.DEFAULT_EPOCHS),
        "--lr", str(Config.DEFAULT_LEARNING_RATE),
    ]
    
    print(f"\n[CONFIG] 蒸馏批次: {HardwareConfig.MAX_BATCH_SIZE_DISTILL} (需加载两个模型)")
    print(f"[CONFIG] 预期压缩比: ~2x (参数量减半)")
    print(f"[CONFIG] 预期加速比: ~2x (推理速度翻倍)")
    
    return run_script_with_args(cmd, "知识蒸馏", log_file=log_file)

def stage_4_multitask_learning():
    """阶段4: 多任务学习训练 (NLP 优化技术 #2)"""
    print_step(4, 6, "多任务学习训练 (Multi-Task Learning)")
    
    print("[NLP] 技术: 多任务学习 (4 个 NLP 任务联合训练)")
    print("[NLP] 任务1: 代码生成 (Code Generation) - 主任务")
    print("[NLP] 任务2: 代码摘要 (Code Summarization) - 反向任务")
    print("[NLP] 任务3: 代码补全 (Code Completion) - 序列补全")
    print("[NLP] 任务4: 语法检查 (Syntax Checking) - 序列分类")
    print(f"[NLP] 任务权重: {HardwareConfig.MULTITASK_WEIGHTS}")
    print("[NLP] 原理: 通过辅助任务提升主任务泛化能力")
    
    if check_trained_model(Config.MODEL_MULTITASK_DIR):
        print("[SKIP] 发现已训练的多任务模型")
        return True
    
    log_file = get_log_file("04_multitask")
    cmd = [
        sys.executable, Config.MULTITASK_SCRIPT,
        "--task_weights", ",".join(map(str, HardwareConfig.MULTITASK_WEIGHTS)),
        "--batch_size", str(HardwareConfig.MAX_BATCH_SIZE_MULTITASK),
        "--epochs", str(Config.DEFAULT_EPOCHS),
        "--gen_samples", "None",  # 使用全部
        "--summ_samples", "8000",
        "--comp_samples", "6000",
        "--syntax_samples", "6000",
    ]
    
    print(f"\n[CONFIG] 批次大小: {HardwareConfig.MAX_BATCH_SIZE_MULTITASK}")
    print(f"[CONFIG] 样本分布: 生成(全部) + 摘要(8k) + 补全(6k) + 语法(6k)")
    
    return run_script_with_args(cmd, "多任务学习", log_file=log_file)

def stage_5_contrastive_pretraining():
    """阶段5: 对比学习预训练 (NLP 优化技术 #3)"""
    print_step(5, 6, "对比学习预训练 (Contrastive Learning)")
    
    print("[NLP] 技术: 对比学习 (Triplet Loss)")
    print("[NLP] 原理: 拉近相似代码，推远不相似代码")
    print(f"[NLP] Margin: {HardwareConfig.CONTRASTIVE_MARGIN}")
    print(f"[NLP] 投影维度: {HardwareConfig.CONTRASTIVE_PROJECTION_DIM}")
    print("[NLP] 三元组: (anchor, positive, negative)")
    print("[NLP] 效果: 学习更好的代码语义表示")
    
    if check_trained_model(Config.MODEL_CONTRASTIVE_DIR):
        print("[SKIP] 发现已进行对比学习的模型")
        return True
    
    log_file = get_log_file("05_contrastive")
    cmd = [
        sys.executable, Config.CONTRASTIVE_SCRIPT,
        "--data_path", os.path.join(Config.DATA_PROCESSED_DIR, "complex_train_text2code.jsonl"),
        "--batch_size", str(HardwareConfig.MAX_BATCH_SIZE_CONTRASTIVE),
        "--epochs", "3",
        "--lr", str(Config.DEFAULT_LEARNING_RATE),
        "--max_length", "128",  # 对比学习可以用更短序列
    ]
    
    print(f"\n[CONFIG] 批次大小: {HardwareConfig.MAX_BATCH_SIZE_CONTRASTIVE} (对比学习可用大批次)")
    print(f"[CONFIG] 序列长度: 128 (编码器输入)")
    
    return run_script_with_args(cmd, "对比学习", log_file=log_file)

def stage_6_evaluation():
    """阶段6: 模型评估与可视化"""
    print_step(6, 6, "模型评估与可视化")
    
    print("\n[INFO] 开始评估所有训练的模型...")
    
    # 评估模型列表 - 使用模型类型而不是路径
    models_to_eval = [
        ("Complex LoRA", "complex", Config.MODEL_OUTPUT_DIR + "_complex"),
        ("Multitask LoRA", "multitask", Config.MODEL_MULTITASK_DIR),
        ("Distilled Student", "simple", Config.MODEL_DISTILL_DIR),
    ]
    
    results = {}
    
    for model_name, model_type, model_path in models_to_eval:
        if not os.path.exists(model_path):
            print(f"[SKIP] {model_name} 不存在，跳过评估")
            continue
        
        print(f"\n{'='*80}")
        print(f"评估 {model_name}")
        print(f"{'='*80}")
        
        # CodeSearchNet 评估 (主要评估指标)
        print(f"\n[CodeSearchNet] 评估 {model_name}...")
        log_file = get_log_file(f"eval_csn_{model_name.replace(' ', '_')}")
        cmd = [
            sys.executable, Config.EVAL_CODESEARCHNET_SCRIPT,
            "--model", model_type,
            "--n", "100",  # 评估100个样本
        ]
        
        csn_success = run_script_with_args(cmd, f"CodeSearchNet评估-{model_name}", log_file=log_file)
        
        results[model_name] = {
            "codesearchnet": csn_success,
            "model_type": model_type,
            "model_path": model_path
        }
    
    # 生成可视化报告
    print(f"\n{'='*80}")
    print("生成评估报告")
    print(f"{'='*80}")
    
    # 打印评估总结
    print_banner("评估总结", "=")
    
    if not results:
        print("[WARN] 没有找到可评估的模型")
        print("[TIP] 请先运行训练流程: python main.py --full-auto")
        return False
    
    for model_name, result in results.items():
        status = "✓" if result['codesearchnet'] else "✗"
        print(f"{status} {model_name}: 评估{'成功' if result['codesearchnet'] else '失败'}")
        print(f"   模型路径: {result['model_path']}")
    
    print("\n[INFO] 详细评估结果请查看日志目录: " + Config.LOGS_DIR)
    
    return True

def train_model(batch_size, epochs):
    """模型训练步骤"""
    print_banner("模型训练", "-")
    
    if check_trained_model():
        print("[SKIP] 发现已训练的模型，跳过训练步骤")
        print("[TIP] 如需重新训练，请删除model/text2code_lora目录")
        return True
    
    if not check_processed_data():
        print("[ERROR] 未找到处理后的数据，请先执行数据预处理")
        return False
    
    # 可以在这里修改训练脚本的参数
    print(f"[CONFIG] 训练参数: batch_size={batch_size}, epochs={epochs}")
    
    return run_script(Config.TRAIN_SCRIPT, "模型训练", check_output=False)

def run_inference():
    """推理测试步骤"""
    print_banner("推理测试", "-")
    
    if not check_trained_model():
        print("[ERROR] 未找到训练好的模型，请先执行模型训练")
        return False
    
    # 运行简单的测试
    run_interactive_test()
    return True

def sample_training_data(n: int = 5):
    """随机抽样查看处理后的训练样本，用于快速对齐检查"""
    import json, random
    path = os.path.join(Config.DATA_PROCESSED_DIR, "train_text2code.jsonl")
    if not os.path.exists(path):
        print(f"[WARN] 未找到 {path}，请先完成数据预处理")
        return False
    with open(path, "r", encoding="utf-8") as f:
        lines = [json.loads(ln) for ln in f if ln.strip()]
    if not lines:
        print("[WARN] 训练样本为空")
        return False
    k = min(n, len(lines))
    print(f"[SAMPLE] 随机查看 {k} 个样本（共 {len(lines)}）:")
    for i, item in enumerate(random.sample(lines, k), 1):
        instr = (item.get("instruction", "") or "").strip()
        out = (item.get("output", "") or "").strip()
        print("-"*60)
        print(f"[#{i}] instruction:\n{instr[:400]}{'...' if len(instr)>400 else ''}")
        print(f"[#{i}] output[preview]:\n{out.splitlines()[0] if out else ''}")
    return True

# ===================== #
# 高级优化方法
# ===================== #
def run_distillation(epochs=5, batch_size=16):
    """知识蒸馏训练"""
    print_banner("知识蒸馏 (Knowledge Distillation)", "-")
    
    script = os.path.join(Config.BASE_DIR, "distillation_train.py")
    cmd = [
        sys.executable, script,
        "--teacher_model", "Salesforce/codet5-base",
        "--student_model", "Salesforce/codet5-small",
        "--epochs", str(epochs),
        "--batch_size", str(batch_size)
    ]
    
    return run_script_with_args(cmd, "知识蒸馏训练", check_output=False)

def run_multitask(epochs=5, batch_size=16):
    """多任务学习训练"""
    print_banner("多任务学习 (Multi-Task Learning)", "-")
    
    script = os.path.join(Config.BASE_DIR, "multitask_train_v2.py")
    cmd = [
        sys.executable, script,
        "--epochs", str(epochs),
        "--batch_size", str(batch_size),
        "--task_weights", "0.4,0.3,0.2,0.1"
    ]
    
    return run_script_with_args(cmd, "多任务学习训练", check_output=False)

def run_contrastive(epochs=3, batch_size=32):
    """对比学习预训练"""
    print_banner("对比学习 (Contrastive Learning)", "-")
    
    script = os.path.join(Config.BASE_DIR, "contrastive_pretrain.py")
    cmd = [
        sys.executable, script,
        "--data_path", "data/processed/complex_train_text2code.jsonl",
        "--epochs", str(epochs),
        "--batch_size", str(batch_size)
    ]
    
    return run_script_with_args(cmd, "对比学习预训练", check_output=False)

    

def run_all_advanced_methods():
    """运行所有高级优化方法"""
    print_banner("运行所有高级优化方法", "=")
    
    methods = [
        ("知识蒸馏", lambda: run_distillation()),
        ("多任务学习", lambda: run_multitask()),
        ("对比学习", lambda: run_contrastive()),
    ]
    
    for i, (name, func) in enumerate(methods, 1):
        print(f"\n[{i}/4] 开始 {name}...")
        if not func():
            print(f"[ERROR] {name} 失败，终止后续执行")
            return False
        print(f"[SUCCESS] {name} 完成！\n")
    
    return True

def run_evaluation():
    """运行集成评估"""
    print_banner("集成评估", "-")
    
    # 检查是否在 Windows
    if os.name == 'nt':
        print("[INFO] Windows 系统，使用 Python 运行评估...")
        # 在 Windows 上直接调用 Python 脚本逻辑
        # 或者提示用户在 Linux 服务器上运行
        print("[TIP] 建议在 Linux 服务器上运行: bash evaluate_all_models.sh")
        return True
    else:
        script = os.path.join(Config.BASE_DIR, "evaluate_all_models.sh")
        cmd = ["bash", script]
        return run_script_with_args(cmd, "集成评估", check_output=False)


# ===================== #
# 全自动化主函数
# ===================== #
def run_full_pipeline():
    """运行完整的自动化训练流程 (6个阶段)"""
    print_banner("🚀 Text2Code 全自动化训练流程", "=")
    print("\n=== Transformer & NLP 技术全流程 ===\n")
    print("📐 Transformer 架构:")
    print("  - CodeT5 (T5-based Encoder-Decoder)")
    print("  - Multi-Head Self-Attention + Cross-Attention")
    print("  - LoRA 参数高效微调\n")
    
    print("🧠 NLP 优化技术:")
    print("  - 知识蒸馏 (Knowledge Distillation)")
    print("  - 对比学习 (Contrastive Learning)")
    print("  - 多任务学习 (Multi-Task Learning)")
    print("  - Few-Shot Learning (Prompt Engineering)\n")
    
    print("📋 完整流程 (6个阶段):")
    print("  [1]  数据预处理 (CodeSearchNet + MBPP)")
    print("  [2]  基础 Transformer 训练 (CodeT5 + LoRA)")
    print("  [3]  知识蒸馏 (base→small, 模型压缩)")
    print("  [4]  多任务学习 (4个任务, 泛化增强)")
    print("  [5]  对比学习 (Triplet Loss, 表征优化)")
    print("  [6]  模型评估与可视化")
    
    # 显示硬件配置
    if torch and torch.cuda.is_available():
        gpu_name = torch.cuda.get_device_name(0)
        gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
        print(f"\n💻 硬件配置: {gpu_name} ({gpu_memory:.0f}GB)")
        print(f"[FAST] 批次大小: {HardwareConfig.MAX_BATCH_SIZE} (自动优化)")
        print(f"🔧 混合精度: {'BF16' if HardwareConfig.USE_BF16 else 'FP16' if HardwareConfig.USE_FP16 else 'FP32'}")
        estimated_time = "8-12" if gpu_memory >= 16 else "12-18" if gpu_memory >= 12 else "18-24"
        print(f"[TIME]  预计时间: ~{estimated_time}小时")
    else:
        print(f"\n💻 硬件配置: CPU (训练较慢)")
        print(f"[TIME]  预计时间: ~48-72小时")
    
    print("\n" + "="*80)
    
    # 检查是否在交互式终端中运行
    import sys
    if sys.stdin.isatty():
        try:
            input("\n按 Enter 开始执行，Ctrl+C 取消...")
        except:
            pass
    else:
        print("[INFO] 检测到非交互式环境，自动开始执行...")
    
    start_time = time.time()
    stages = [
        ("数据预处理", stage_1_preprocess_data),
        ("Transformer 基础训练", stage_2_base_training),
        ("知识蒸馏", stage_3_knowledge_distillation),
        ("多任务学习", stage_4_multitask_learning),
        ("对比学习", stage_5_contrastive_pretraining),
        ("模型评估", stage_6_evaluation),
    ]
    
    completed_stages = []
    failed_stage = None
    
    for stage_name, stage_func in stages:
        print(f"\n{'='*80}")
        print(f"[RUN]  开始: {stage_name}")
        print(f"{'='*80}")
        
        if not stage_func():
            print(f"\n[FAIL] {stage_name} 失败，终止流程")
            failed_stage = stage_name
            break
        
        completed_stages.append(stage_name)
        print(f"\n[SUCCESS] {stage_name} 完成\n")
    
    total_time = time.time() - start_time
    hours = int(total_time // 3600)
    minutes = int((total_time % 3600) // 60)
    
    print("\n" + "="*80)
    if failed_stage:
        print_banner(f"❌ 流程中断于: {failed_stage}", "=")
        print(f"[SUCCESS] 已完成阶段: {', '.join(completed_stages)}")
    else:
        print_banner("🎉 全流程执行完成!", "=")
        print(f"[SUCCESS] 所有阶段均已完成")
    
    print(f"\n[TIME]  总耗时: {hours}小时 {minutes}分钟")
    
    print(f"\n📁 模型保存位置:")
    print(f"  - 基础模型 (Complex): {Config.MODEL_OUTPUT_DIR}_complex")
    print(f"  - 基础模型 (Simple):  {Config.MODEL_OUTPUT_DIR}_simple")
    print(f"  - 蒸馏模型:           {Config.MODEL_DISTILL_DIR}")
    print(f"  - 多任务模型:         {Config.MODEL_MULTITASK_DIR}")
    print(f"  - 对比学习模型:       {Config.MODEL_CONTRASTIVE_DIR}")
    
    print(f"\n[DATA] 结果文件:")
    print(f"  - 评估结果: {Config.RESULTS_DIR}")
    print(f"  - 训练日志: {Config.LOGS_DIR}")
    
    print("\n" + "="*80)
    print("[START] 快速使用:")
    print(f"  # 交互式推理测试")
    print(f"  python {Config.INFERENCE_SCRIPT}")
    print(f"\n  # 启动 Web 服务")
    print(f"  python backend/serve_api.py")
    print("="*80 + "\n")
    
    return failed_stage is None

def main():
    """主函数"""
    parser = argparse.ArgumentParser(
        description="Text2Code 全自动化训练系统 - Transformer & NLP 优化技术",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
=== Transformer & NLP 技术栈 ===
📐 Transformer 架构:
  - CodeT5 (T5-based Encoder-Decoder)
  - Multi-Head Self-Attention + Cross-Attention
  - LoRA 参数高效微调 (0.5%% 参数)

🧠 NLP 优化技术:
  - 知识蒸馏 (Teacher-Student + KL散度)
  - 对比学习 (Triplet Loss)
  - 多任务学习 (4个NLP任务)
  - Few-Shot Learning (Prompt Engineering)

=== 使用示例 ===
# 完整自动化流程（推荐）
python main.py --full-auto

# 单独执行某个阶段
python main.py --stage preprocess  # 数据预处理
python main.py --stage train       # Transformer基础训练
python main.py --stage distill     # 知识蒸馏
python main.py --stage multitask   # 多任务学习
python main.py --stage contrastive # 对比学习
python main.py --stage evaluate    # 模型评估

# 自定义训练参数
python main.py --full-auto --batch-size 16 --epochs 5

# 跳过已完成的阶段
python main.py --full-auto --resume
        """
    )
    
    parser.add_argument("--full-auto", action="store_true",
                       help="执行完整的6阶段自动化流程（推荐）")
    parser.add_argument("--stage",
               choices=["preprocess", "train", "distill", "multitask", 
                   "contrastive", "evaluate"],
                       help="单独执行某个阶段")
    parser.add_argument("--resume", action="store_true",
                       help="跳过已完成的阶段，继续执行")
    parser.add_argument("--batch-size", type=int, 
                       default=None,
                       help=f"训练批次大小 (默认: 自动检测GPU)")
    parser.add_argument("--epochs", type=int, 
                       default=Config.DEFAULT_EPOCHS,
                       help=f"训练轮数 (默认: {Config.DEFAULT_EPOCHS})")
    parser.add_argument("--force", action="store_true",
                       help="强制重新训练，忽略已有模型")
    parser.add_argument("--report-to",
                       choices=["none", "tensorboard", "wandb"],
                       default=os.getenv("T2C_REPORT_TO", "none"),
                       help="训练日志上报渠道 (默认: none)")
    
    args = parser.parse_args()
    
    # 打印欢迎信息
    print_banner("Text2Code - Transformer & NLP 全自动化训练系统", "=")
    print(f"⏰ 启动时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"📁 工作目录: {Config.BASE_DIR}")
    print(f"🔧 Python: {sys.version.split()[0]}")
    
    # 环境检查
    if not check_environment():
        print("\n[FAIL] 环境检查失败，请检查依赖和配置")
        return False
    
    # 自定义批次大小
    if args.batch_size:
        HardwareConfig.MAX_BATCH_SIZE = args.batch_size
        HardwareConfig.MAX_BATCH_SIZE_DISTILL = max(4, args.batch_size // 2)
        HardwareConfig.MAX_BATCH_SIZE_MULTITASK = args.batch_size
        HardwareConfig.MAX_BATCH_SIZE_CONTRASTIVE = args.batch_size * 2
        print(f"\n[CONFIG] 使用自定义批次大小: {args.batch_size}")
    
    success = False
    
    try:
        # 配置训练日志上报渠道
        if args.report_to:
            os.environ["T2C_REPORT_TO"] = args.report_to
            print(f"[LOGGING] 训练日志上报: {args.report_to}")

        if args.full_auto:
            # 完整自动化流程
            success = run_full_pipeline()
        
        elif args.stage:
            # 单独执行某个阶段
            stage_map = {
                "preprocess": ("数据预处理", stage_1_preprocess_data),
                "train": ("Transformer 基础训练", stage_2_base_training),
                "distill": ("知识蒸馏", stage_3_knowledge_distillation),
                "multitask": ("多任务学习", stage_4_multitask_learning),
                "contrastive": ("对比学习", stage_5_contrastive_pretraining),
                "evaluate": ("模型评估", stage_6_evaluation),
            }
            
            stage_name, stage_func = stage_map[args.stage]
            print(f"\n[RUN]  执行单独阶段: {stage_name}\n")
            success = stage_func()
        
        else:
            # 显示帮助信息
            parser.print_help()
            print("\n[TIP] 推荐使用: python main.py --full-auto")
            return True
    
    except KeyboardInterrupt:
        print("\n\n[WARNING]  用户中断执行 (Ctrl+C)")
        success = False
    except Exception as e:
        print(f"\n[FAIL] 执行过程中发生错误: {str(e)}")
        import traceback
        traceback.print_exc()
        success = False
    
    # 总结
    print("\n" + "="*80)
    if success:
        print("[SUCCESS] 执行成功")
        print("\n下一步:")
        print("  1. 查看训练日志: ls logs/")
        print("  2. 测试模型推理: python utils/inference.py")
        print("  3. 启动 Web 服务: python backend/serve_api.py")
        print("  4. 查看技术文档: cat TRANSFORMER_NLP_ANALYSIS.md")
    else:
        print("[FAIL] 执行失败")
        print("\n故障排查:")
        print("  1. 检查日志文件: ls logs/")
        print("  2. 检查环境依赖: pip list | grep -E 'torch|transformers|peft'")
        print("  3. 查看技术文档: cat TRANSFORMER_NLP_ANALYSIS.md")
    print("="*80 + "\n")
    
    return success
    
    return success

if __name__ == "__main__":
    success = main()
    sys.exit(0 if success else 1)