#!/usr/bin/env python
# encoding: utf-8
"""
@license: (C) Copyright 2025, Jason.
@author: Jason
@email: 809341512@qq.com
@tel: 15383646168
@datetime: 2025/01/XX
@project: LucaOneTasks
@file: lucaone_lora_adapter.py
@desc: LoRA adapter for LucaOne pre-trained model (Q/K/V+Out+FFN strategy)
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, List, Dict, Any
import math
import re


class LoRALinear(nn.Module):
    """
    LoRA 包装的线性层，可以替换原始 Linear 层
    实现: W' = W + (B @ A) * (alpha / rank)
    """
    
    def __init__(
        self,
        base_layer: nn.Linear,
        rank: int = 8,
        alpha: float = 16.0,
        dropout: float = 0.1,
        merge_weights: bool = False
    ):
        super().__init__()
        self.base_layer = base_layer
        self.rank = rank
        self.alpha = alpha
        self.scaling = alpha / rank
        self.merge_weights = merge_weights
        
        # 冻结原始层
        for param in self.base_layer.parameters():
            param.requires_grad = False
        
        # LoRA 矩阵 A 和 B
        in_features = base_layer.in_features
        out_features = base_layer.out_features
        
        self.lora_A = nn.Parameter(torch.randn(rank, in_features))
        self.lora_B = nn.Parameter(torch.zeros(out_features, rank))
        self.lora_dropout = nn.Dropout(dropout)
        
        # 初始化
        nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
        nn.init.zeros_(self.lora_B)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        
        Args:
            x: [batch, seq_len, in_features] 或 [..., in_features]
            
        Returns:
            [batch, seq_len, out_features] 或 [..., out_features]
        """
        # 原始输出
        base_output = self.base_layer(x)
        
        # LoRA 适配: x @ A^T @ B^T = x @ (A @ B)^T
        # 先通过 A: x @ A^T -> [..., rank]
        lora_intermediate = F.linear(
            self.lora_dropout(x),
            self.lora_A.t(),  # [rank, in_features] -> [in_features, rank]
            None
        )  # [..., rank]
        
        # 再通过 B: lora_intermediate @ B^T -> [..., out_features]
        lora_output = F.linear(
            lora_intermediate,
            self.lora_B.t(),  # [out_features, rank] -> [rank, out_features]
            None
        )  # [..., out_features]
        
        lora_output = lora_output * self.scaling
        
        return base_output + lora_output
    
    def merge_weights_(self):
        """合并 LoRA 权重到基础层 (推理时使用)"""
        if not self.merge_weights:
            delta_w = (self.lora_B @ self.lora_A) * self.scaling
            self.base_layer.weight.data += delta_w
            self.merge_weights = True
    
    def unmerge_weights_(self):
        """分离合并的权重 (训练时使用)"""
        if self.merge_weights:
            delta_w = (self.lora_B @ self.lora_A) * self.scaling
            self.base_layer.weight.data -= delta_w
            self.merge_weights = False


def _detect_model_type(model: nn.Module) -> str:
    """
    自动检测模型类型: "luca_transformer" 或 "bert_model"
    """
    # 检查是否是 BertModel
    try:
        from src.common.modeling_bert import BertModel
        if isinstance(model, BertModel):
            return "bert_model"
    except ImportError:
        pass
    
    # 检查是否是 LucaTransformer
    try:
        from src.common.cross_transformer import LucaTransformer
        if isinstance(model, LucaTransformer):
            return "luca_transformer"
    except ImportError:
        pass
    
    # 通过结构特征判断
    if hasattr(model, 'encoder'):
        encoder = model.encoder
        if isinstance(encoder, nn.ModuleList) and len(encoder) > 0:
            first_layer = encoder[0]
            # BertModel: encoder.layer[0].attention.self.query
            if hasattr(first_layer, 'attention'):
                return "bert_model"
            # LucaTransformer: encoder[0].self_attn.q_proj
            elif hasattr(first_layer, 'self_attn'):
                return "luca_transformer"
    
    # 默认尝试 LucaTransformer 的命名规则
    return "luca_transformer"


def _match_module_name(name: str, pattern: str) -> bool:
    """
    精确匹配模块名称
    
    Args:
        name: 完整模块名称，如 "encoder.0.self_attn.q_proj"
        pattern: 匹配模式，如 "self_attn.q_proj" 或 "attention.self.query"
    
    Returns:
        bool: 是否匹配
    """
    # 移除权重后缀 (.weight, .bias)
    name_base = name.split('.weight')[0].split('.bias')[0]
    
    # 精确匹配：name 以 pattern 结尾
    # 例如: "encoder.0.self_attn.q_proj" 匹配 "self_attn.q_proj"
    return name_base.endswith('.' + pattern) or name_base == pattern


def apply_lora_to_lucaone_model(
    model: nn.Module,
    target_modules: List[str] = None,
    rank: int = 8,
    alpha: float = 16.0,
    dropout: float = 0.1,
    model_type: str = "auto"
) -> Dict[str, Any]:
    """
    将 LoRA 应用到 LucaOne 模型 (支持 LucaTransformer 和 BertModel)
    
    Args:
        model: LucaBase/LucaTransformer 或 LucaPPI/BertModel
        target_modules: 目标模块名称模式列表
            LucaTransformer: ["self_attn.q_proj", "self_attn.k_proj", ...]
            BertModel: ["attention.self.query", "attention.self.key", ...]
            如果为 None，会根据模型类型自动设置
        rank: LoRA rank
        alpha: LoRA alpha (缩放因子)
        dropout: LoRA dropout
        model_type: "auto", "luca_transformer", "bert_model"
        
    Returns:
        Dict: {
            "replaced_modules": List[str],  # 被替换的模块名称
            "total_lora_params": int,       # LoRA 参数总数
            "original_params": int,          # 原始参数总数
            "model_type": str               # 检测到的模型类型
        }
    """
    # 自动检测模型类型
    if model_type == "auto":
        model_type = _detect_model_type(model)
    
    # 根据模型类型设置默认目标模块和名称映射
    if target_modules is None:
        if model_type == "bert_model":
            # BertModel 的默认目标模块 (Q/K/V+Out+FFN)
            target_modules = [
                "attention.self.query",
                "attention.self.key",
                "attention.self.value",
                "attention.output.dense",
                "intermediate.dense",
                "output.dense"
            ]
        else:
            # LucaTransformer 的默认目标模块 (Q/K/V+Out+FFN)
            target_modules = [
                "self_attn.q_proj",
                "self_attn.k_proj",
                "self_attn.v_proj",
                "self_attn.out_proj",
                "fc1",
                "fc2"
            ]
    else:
        # 如果是用户提供的 LucaTransformer 格式的目标模块，需要转换为 BertModel 格式
        if model_type == "bert_model":
            # 名称映射
            name_mapping = {
                "self_attn.q_proj": "attention.self.query",
                "self_attn.k_proj": "attention.self.key",
                "self_attn.v_proj": "attention.self.value",
                "self_attn.out_proj": "attention.output.dense",
                "fc1": "intermediate.dense",
                "fc2": "output.dense"
            }
            # 转换目标模块名称
            converted_modules = []
            for pattern in target_modules:
                if pattern in name_mapping:
                    converted_modules.append(name_mapping[pattern])
                else:
                    # 保持原样（用户可能已经提供 BertModel 格式）
                    converted_modules.append(pattern)
            target_modules = converted_modules
    
    replaced_modules = []
    total_lora_params = 0
    original_params = sum(p.numel() for p in model.parameters())
    
    # 获取所有模块名称映射（用于父模块查找）
    all_modules = dict(model.named_modules())
    
    # 遍历所有模块
    for name, module in model.named_modules():
        if not isinstance(module, nn.Linear):
            continue
        
        # 检查是否匹配目标模块（使用精确匹配）
        matched = False
        for pattern in target_modules:
            if _match_module_name(name, pattern):
                matched = True
                break
        
        if not matched:
            continue
        
        # 获取父模块和子模块名称
        name_parts = name.split('.')
        if len(name_parts) < 2:
            continue
        
        parent_name = '.'.join(name_parts[:-1])
        child_name = name_parts[-1]
        
        if parent_name and parent_name in all_modules:
            parent_module = all_modules[parent_name]
        else:
            # 根模块
            parent_module = model
        
        # 创建 LoRA 包装层
        lora_layer = LoRALinear(
            base_layer=module,
            rank=rank,
            alpha=alpha,
            dropout=dropout
        )
        
        # 替换模块
        setattr(parent_module, child_name, lora_layer)
        
        replaced_modules.append(name)
        
        # 计算 LoRA 参数量
        lora_params = (
            module.in_features * rank +  # A 矩阵
            rank * module.out_features    # B 矩阵
        )
        total_lora_params += lora_params
        
        print(f"✓ Replaced {name}: {module.in_features}×{module.out_features} "
              f"→ LoRA {lora_params:,} params (rank={rank})")
    
    if len(replaced_modules) == 0:
        print(f"\n⚠️  警告: 没有找到任何匹配的模块!")
        print(f"  模型类型: {model_type}")
        print(f"  目标模块模式: {target_modules}")
        print(f"  提示: 请检查模型类型和目标模块名称是否匹配")
    
    print(f"\n总统计:")
    print(f"  模型类型: {model_type}")
    print(f"  替换模块数: {len(replaced_modules)}")
    print(f"  LoRA 参数总量: {total_lora_params:,}")
    print(f"  原始模型参数: {original_params:,}")
    if original_params > 0:
        print(f"  LoRA 占比: {100 * total_lora_params / original_params:.2f}%")
    
    return {
        "replaced_modules": replaced_modules,
        "total_lora_params": total_lora_params,
        "original_params": original_params,
        "model_type": model_type
    }


def freeze_base_model(model: nn.Module, unfreeze_lora: bool = True):
    """
    冻结基础模型参数，仅保留 LoRA 参数可训练
    
    Args:
        model: 应用了 LoRA 的模型
        unfreeze_lora: 是否解冻 LoRA 参数
    """
    # 先冻结所有参数
    for param in model.parameters():
        param.requires_grad = False
    
    # 解冻 LoRA 参数
    if unfreeze_lora:
        for name, module in model.named_modules():
            if isinstance(module, LoRALinear):
                module.lora_A.requires_grad = True
                module.lora_B.requires_grad = True


def get_lora_parameters(model: nn.Module) -> List[torch.nn.Parameter]:
    """
    获取所有 LoRA 参数
    
    Args:
        model: 应用了 LoRA 的模型
        
    Returns:
        List[Parameter]: LoRA 参数列表
    """
    lora_params = []
    for module in model.modules():
        if isinstance(module, LoRALinear):
            lora_params.append(module.lora_A)
            lora_params.append(module.lora_B)
    return lora_params


def print_trainable_parameters(model: nn.Module):
    """
    打印可训练参数统计
    """
    trainable_params = 0
    all_params = 0
    
    for name, param in model.named_parameters():
        all_params += param.numel()
        if param.requires_grad:
            trainable_params += param.numel()
            print(f"  [TRAINABLE] {name}: {param.shape} ({param.numel():,} params)")
    
    print(f"\n可训练参数: {trainable_params:,} / {all_params:,} ({100 * trainable_params / all_params:.2f}%)")


# 使用示例和测试
if __name__ == "__main__":
    # 示例: 如何应用到 LucaBase 模型
    print("LoRA 适配器示例")
    print("=" * 60)
    
    # 这里需要实际的模型配置
    # from src.common.luca_base import LucaBase
    # from src.common.model_config import LucaConfig
    # 
    # config = LucaConfig(...)
    # model = LucaBase(config, args)
    # 
    # # 应用 LoRA
    # apply_lora_to_lucaone_model(
    #     model=model.seq_encoder,  # 或 model.matrix_encoder
    #     target_modules=["self_attn.q_proj", "self_attn.k_proj", "self_attn.v_proj", 
    #                    "self_attn.out_proj", "fc1", "fc2"],
    #     rank=8,
    #     alpha=16.0
    # )
    # 
    # # 冻结基础模型
    # freeze_base_model(model)
    # 
    # # 打印统计
    # print_trainable_parameters(model)
    
    print("\n✓ LoRA 适配器模块加载成功")

