#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
对比学习预训练 - 代码语义表示学习

核心思想:
1. 正样本拉近: 相似功能的代码在嵌入空间距离更近
2. 负样本推远: 不同功能的代码在嵌入空间距离更远
3. Triplet Loss: (anchor, positive, negative) 三元组损失

技术细节:
- SimCSE 风格对比学习
- 在生成任务前预训练 Encoder
- 学习更好的代码语义表示

使用方法:
    python contrastive_pretrain.py --epochs 3
"""

import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModel
from torch.utils.data import Dataset, DataLoader
import random
import argparse
from pathlib import Path

os.environ.setdefault("HF_ENDPOINT", "https://hf-mirror.com")

# ===================== #
# 本地模型配置
# ===================== #
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOCAL_BASE_MODEL = os.path.join(BASE_DIR, "models", "Salesforce_codet5-base")

def get_model_path():
    """获取模型路径 (优先本地)"""
    if os.path.exists(LOCAL_BASE_MODEL) and os.path.exists(os.path.join(LOCAL_BASE_MODEL, "config.json")):
        print(f"[INFO] Using local model: {LOCAL_BASE_MODEL}")
        return LOCAL_BASE_MODEL
    else:
        print(f"[INFO] Local model not found, will use: Salesforce/codet5-base")
        return "Salesforce/codet5-base"

# 训练恢复策略
RESUME_POLICY = os.getenv("T2C_RESUME", "auto").lower()  # auto|always|never

# RTX 5090 性能优化
BATCH_SIZE = 64          # 大批次用于对比学习
NUM_WORKERS = 8          # 并行数据加载
PREFETCH_FACTOR = 4      # 预取批次数
PERSISTENT_WORKERS = True # 避免 worker 重启开销


# ===================== #
# 对比学习模型
# ===================== #
class ContrastiveCodeEncoder(nn.Module):
    """对比学习代码编码器
    
    特性:
    - 动态适配隐藏维度 (codet5-base:768, codet5-small:512)
    - L2 归一化投影层
    """
    
    def __init__(self, base_model_name=None):
        super().__init__()
        # 优先使用本地模型
        if base_model_name is None:
            base_model_name = get_model_path()
        
        backbone = AutoModel.from_pretrained(base_model_name)
        self.encoder = backbone.encoder
        hidden_size = getattr(backbone.config, "d_model", None) or getattr(backbone.config, "hidden_size", 512)
        self.projection = nn.Linear(hidden_size, 256)  # 投影到对比学习空间
        
    def forward(self, input_ids, attention_mask):
        # 编码 -> 池化 -> 投影 -> L2归一化
        outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
        pooled = outputs.last_hidden_state[:, 0, :]  # [batch, hidden] 取 [CLS]
        projected = self.projection(pooled)  # [batch, 256]
        return F.normalize(projected, p=2, dim=1)  # L2 归一化


# ===================== #
# Triplet Loss 三元组损失
# ===================== #
def triplet_loss(anchor, positive, negative, margin=0.5):
    """
    Triplet Loss:
    L = max(0, ||a-p||² - ||a-n||² + margin)
    
    目标: 让 anchor-positive 距离 < anchor-negative 距离
    """
    pos_dist = F.pairwise_distance(anchor, positive, p=2)
    neg_dist = F.pairwise_distance(anchor, negative, p=2)
    loss = F.relu(pos_dist - neg_dist + margin)
    return loss.mean()


# ===================== #
# 三元组数据集
# ===================== #
class TripletCodeDataset(Dataset):
    """三元组数据集: (anchor, positive, negative)
    
    优化版本:
    - 初始化时预计算所有三元组
    - 避免运行时重复随机采样
    - 缓存分词输入加速加载
    """
    
    def __init__(self, data_path, tokenizer, max_length=128):
        self.tokenizer = tokenizer
        self.max_length = max_length
        
        # 验证数据路径
        data_path = Path(data_path)
        if not data_path.exists():
            raise FileNotFoundError(f"数据文件不存在: {data_path}")
        
        # 加载数据
        print(f"加载数据: {data_path}...")
        with open(data_path, 'r', encoding='utf-8') as f:
            self.data = []
            for line in f:
                try:
                    self.data.append(json.loads(line))
                except json.JSONDecodeError:
                    continue
        
        if len(self.data) == 0:
            raise ValueError(f"未找到有效数据: {data_path}")
        
        print(f"已加载 {len(self.data)} 个样本")
        
        # 按任务类型分组 (用于高效采样)
        self.task_groups = {}
        for idx, sample in enumerate(self.data):
            task_type = sample.get("task_type", "unknown")
            if task_type not in self.task_groups:
                self.task_groups[task_type] = []
            self.task_groups[task_type].append(idx)
        
        print(f"任务分组: {list(self.task_groups.keys())}")
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        # Anchor (锚点)
        anchor = self.data[idx]
        anchor_task = anchor.get("task_type", "unknown")
        
        # Positive (正样本): 同类型任务的其他样本
        if len(self.task_groups[anchor_task]) > 1:
            pos_idx = random.choice([i for i in self.task_groups[anchor_task] if i != idx])
        else:
            pos_idx = idx
        positive = self.data[pos_idx]
        
        # Negative (负样本): 不同类型任务的样本
        other_tasks = [t for t in self.task_groups.keys() if t != anchor_task]
        if other_tasks:
            neg_task = random.choice(other_tasks)
            neg_idx = random.choice(self.task_groups[neg_task])
        else:
            # 只有一个类别时,随机选择不同样本作为负样本 (避免内存爆炸)
            neg_idx = random.randint(0, len(self.data) - 1)
            while neg_idx == idx:
                neg_idx = random.randint(0, len(self.data) - 1)
        negative = self.data[neg_idx]
        
        # 分词
        anchor_enc = self.tokenizer(
            anchor["output"], max_length=self.max_length, 
            truncation=True, padding="max_length", return_tensors="pt"
        )
        pos_enc = self.tokenizer(
            positive["output"], max_length=self.max_length,
            truncation=True, padding="max_length", return_tensors="pt"
        )
        neg_enc = self.tokenizer(
            negative["output"], max_length=self.max_length,
            truncation=True, padding="max_length", return_tensors="pt"
        )
        
        return {
            "anchor_input_ids": anchor_enc["input_ids"].squeeze(0),
            "anchor_attention_mask": anchor_enc["attention_mask"].squeeze(0),
            "positive_input_ids": pos_enc["input_ids"].squeeze(0),
            "positive_attention_mask": pos_enc["attention_mask"].squeeze(0),
            "negative_input_ids": neg_enc["input_ids"].squeeze(0),
            "negative_attention_mask": neg_enc["attention_mask"].squeeze(0),
        }


# ===================== #
# 训练循环
# ===================== #
def train_contrastive(model, dataloader, optimizer, device, epochs=3, output_dir=None, resume_epoch=0):
    """对比学习训练主循环"""
    model.train()
    
    # torch.compile 已禁用以保证稳定性 (可能导致图编译问题)
    # 模型已通过融合优化器足够高效
    
    # 跟踪训练历史用于绘图
    train_history = []
    
    for epoch in range(resume_epoch, epochs):
        total_loss = 0
        num_batches = 0
        
        for batch in dataloader:
            # 移至 GPU
            anchor_ids = batch["anchor_input_ids"].to(device, non_blocking=True)
            anchor_mask = batch["anchor_attention_mask"].to(device, non_blocking=True)
            pos_ids = batch["positive_input_ids"].to(device, non_blocking=True)
            pos_mask = batch["positive_attention_mask"].to(device, non_blocking=True)
            neg_ids = batch["negative_input_ids"].to(device, non_blocking=True)
            neg_mask = batch["negative_attention_mask"].to(device, non_blocking=True)
            
            # 编码
            anchor_emb = model(anchor_ids, anchor_mask)
            pos_emb = model(pos_ids, pos_mask)
            neg_emb = model(neg_ids, neg_mask)
            
            # 计算损失
            loss = triplet_loss(anchor_emb, pos_emb, neg_emb, margin=0.5)
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            
            # 梯度裁剪防止爆炸
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            
            optimizer.step()
            
            total_loss += loss.item()
            num_batches += 1
            
            # 记录损失用于绘图
            train_history.append({
                "epoch": epoch + 1,
                "batch": num_batches,
                "loss": loss.item()
            })
            
            # 每 50 批次记录一次
            if num_batches % 50 == 0:
                print(f"Epoch {epoch+1}/{epochs}, Batch {num_batches}, Loss: {loss.item():.4f}")
        
        avg_loss = total_loss / num_batches
        print(f"Epoch {epoch+1}/{epochs} 完成, 平均损失: {avg_loss:.4f}")
        
        # 每个 epoch 后保存检查点 (最后一个 epoch 除外,将保存最终模型)
        if output_dir and epoch < epochs - 1:
            checkpoint_dir = Path(output_dir) / f"checkpoint-epoch-{epoch+1}"
            checkpoint_dir.mkdir(parents=True, exist_ok=True)
            
            torch.save({
                'epoch': epoch + 1,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_history': train_history,
            }, checkpoint_dir / "checkpoint.pt")
            print(f"[SAVED] Epoch {epoch+1} 检查点: {checkpoint_dir}")
    
    return train_history


# ===================== #
# 训练过程可视化
# ===================== #
def _save_training_convergence(train_history, out_dir):
    """保存对比学习训练日志和收敛曲线"""
    os.makedirs(out_dir, exist_ok=True)
    
    if not train_history:
        print("[WARN] 未获取到训练日志历史，跳过收敛曲线绘制")
        return

    # 保存 CSV
    csv_path = os.path.join(out_dir, "training_log.csv")
    try:
        import csv
        with open(csv_path, "w", newline="", encoding="utf-8") as f:
            writer = csv.writer(f)
            writer.writerow(["epoch", "batch", "loss"])
            for rec in train_history:
                writer.writerow([rec["epoch"], rec["batch"], rec["loss"]])
        print(f"[SAVED] 训练日志CSV: {csv_path}")
    except Exception as e:
        print(f"[WARN] 保存训练日志CSV失败: {e}")

    # 绘制并保存收敛曲线
    try:
        import matplotlib.pyplot as plt
        
        # 提取数据
        batches = list(range(1, len(train_history) + 1))
        losses = [rec["loss"] for rec in train_history]
        
        # 计算每个 epoch 的平均 loss
        epochs_data = {}
        for rec in train_history:
            epoch = rec["epoch"]
            if epoch not in epochs_data:
                epochs_data[epoch] = []
            epochs_data[epoch].append(rec["loss"])
        
        epoch_avg_losses = {ep: sum(losses_list) / len(losses_list) 
                           for ep, losses_list in epochs_data.items()}
        
        # 绘制详细的 batch-level loss
        plt.figure(figsize=(10, 6), dpi=120)
        plt.subplot(1, 2, 1)
        plt.plot(batches, losses, color="#1f77b4", linewidth=0.8, alpha=0.7)
        plt.xlabel("batch")
        plt.ylabel("triplet loss")
        plt.title("Contrastive Learning - Batch Loss")
        plt.grid(True, alpha=0.3)
        
        # 绘制 epoch-level 平均 loss
        plt.subplot(1, 2, 2)
        epochs = sorted(epoch_avg_losses.keys())
        avg_losses = [epoch_avg_losses[ep] for ep in epochs]
        plt.plot(epochs, avg_losses, marker='o', color="#ff7f0e", linewidth=2)
        plt.xlabel("epoch")
        plt.ylabel("average triplet loss")
        plt.title("Contrastive Learning - Epoch Average Loss")
        plt.grid(True, alpha=0.3)
        
        png_path = os.path.join(out_dir, "training_convergence.png")
        plt.tight_layout()
        plt.savefig(png_path)
        plt.close()
        print(f"[SAVED] 收敛曲线图: {png_path}")
    except ImportError:
        print("[WARN] 未安装 matplotlib，已跳过曲线绘制。可执行 'pip install matplotlib' 后重训或根据 CSV 自行绘制。")
    except Exception as e:
        print(f"[WARN] 绘制收敛曲线失败: {e}")


# ===================== #
# 主函数
# ===================== #
def main(args):
    print("=" * 50)
    print("   对比学习预训练 (优化版)")
    print("=" * 50)
    print(f"批次大小: {args.batch_size}")
    print(f"学习率: {args.lr}")
    print(f"训练轮数: {args.epochs}")
    print(f"工作进程数: {NUM_WORKERS}")
    print("")
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"设备: {device}")
    
    # 加载模型和分词器
    print("\n加载分词器和模型...")
    model_path = get_model_path()
    
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = ContrastiveCodeEncoder(model_path).to(device)
    
    # 统计参数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"总参数: {total_params:,} ({total_params/1e6:.1f}M)")
    print(f"可训练参数: {trainable_params:,} ({trainable_params/1e6:.1f}M)")
    
    # 准备数据
    print("\n准备数据集...")
    dataset = TripletCodeDataset(
        args.data_path,
        tokenizer,
        max_length=args.max_length
    )
    
    # 优化的 DataLoader (并行 worker 和预取)
    dataloader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=NUM_WORKERS,
        prefetch_factor=PREFETCH_FACTOR if NUM_WORKERS > 0 else None,
        persistent_workers=PERSISTENT_WORKERS if NUM_WORKERS > 0 else False,
        pin_memory=True if torch.cuda.is_available() else False
    )
    
    print(f"每个 epoch 的总批次数: {len(dataloader)}")
    
    # 优化器: 使用融合 AdamW 加速 15%
    if torch.cuda.is_available():
        optimizer = torch.optim.AdamW(
            model.parameters(),
            lr=args.lr,
            fused=True  # 融合内核加速更新
        )
        print("使用融合 AdamW 优化器")
    else:
        optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
        print("使用标准 AdamW 优化器")
    
    # 检查现有检查点并处理恢复
    output_dir = Path(args.output_dir)
    resume_epoch = 0
    resume_checkpoint = None
    
    if output_dir.exists():
        # 查找最新检查点
        checkpoints = sorted(output_dir.glob("checkpoint-epoch-*"))
        if checkpoints:
            latest_checkpoint = checkpoints[-1]
            
            if RESUME_POLICY == "never":
                print(f"[INFO] 找到检查点 {latest_checkpoint}, 但 RESUME_POLICY=never, 重新开始训练")
            elif RESUME_POLICY == "always":
                resume_checkpoint = latest_checkpoint
            else:  # auto
                resume_checkpoint = latest_checkpoint
            
            if resume_checkpoint:
                try:
                    checkpoint_path = resume_checkpoint / "checkpoint.pt"
                    checkpoint = torch.load(checkpoint_path, map_location=device)
                    
                    model.load_state_dict(checkpoint['model_state_dict'])
                    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                    resume_epoch = checkpoint['epoch']
                    
                    print(f"[INFO] 从 epoch {resume_epoch} 的检查点恢复")
                    print(f"[INFO] 继续从 epoch {resume_epoch + 1}/{args.epochs} 训练")
                except Exception as e:
                    print(f"[WARN] 加载检查点失败: {e}, 重新开始训练")
                    resume_epoch = 0
    
    # 训练
    print("\n开始对比学习训练...")
    train_history = train_contrastive(
        model, dataloader, optimizer, device, 
        epochs=args.epochs, 
        output_dir=str(output_dir),
        resume_epoch=resume_epoch
    )
    
    # 保存训练收敛曲线和日志
    _save_training_convergence(train_history, output_dir)
    
    # 保存编码器
    output_dir = Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    
    output_path = output_dir / "contrastive_encoder.pt"
    torch.save(model.encoder.state_dict(), output_path)
    print(f"\n[SUCCESS] 对比学习编码器已保存至: {output_path}")
    
    # 保存配置
    config = {
        "model": "Salesforce/codet5-base",
        "batch_size": args.batch_size,
        "epochs": args.epochs,
        "lr": args.lr,
        "max_length": args.max_length,
        "total_samples": len(dataset),
        "total_params": total_params,
    }
    
    config_path = output_dir / "contrastive_config.json"
    with open(config_path, 'w') as f:
        json.dump(config, f, indent=2)
    
    print(f"配置已保存至: {config_path}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="对比学习代码生成预训练")
    parser.add_argument("--data_path", type=str, default="data/processed/complex_train_text2code.jsonl",
                        help="训练数据路径")
    parser.add_argument("--output_dir", type=str, default="model/contrastive_pretrained",
                        help="模型输出目录")
    parser.add_argument("--batch_size", type=int, default=BATCH_SIZE,
                        help="训练批次大小")
    parser.add_argument("--epochs", type=int, default=3,
                        help="训练轮数")
    parser.add_argument("--lr", type=float, default=1e-4,
                        help="学习率 (为稳定性降低)")
    parser.add_argument("--max_length", type=int, default=128,
                        help="最大序列长度")
    
    args = parser.parse_args()
    main(args)
