#!/usr/bin/env python
# encoding: utf-8
"""
@license: (C) Copyright 2025, Jason.
@author: Jason
@email: 809341512@qq.com
@tel: 15383646168
@datetime: 2025/01/XX
@project: LucaOneTasks
@file: lucaone_lora_adapter.py
@desc: LoRA adapter for LucaOne pre-trained model (Q/K/V+Out+FFN strategy)
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, List, Dict, Any
import math
import re


class LoRALinear(nn.Module):
    """
    LoRA 包装的线性层，可以替换原始 Linear 层
    实现: W' = W + (B @ A) * (alpha / rank)
    
    注意: 此实现完全兼容 PyTorch DistributedDataParallel (DDP)
    - LoRA 参数 (lora_A, lora_B) 使用 nn.Parameter，会被 DDP 自动同步
    - forward 方法是纯函数式的，不修改任何状态
    - 基础层参数被冻结，DDP 会正确处理
    """
    
    def __init__(
        self,
        base_layer: nn.Linear,
        rank: int = 8,
        alpha: float = 16.0,
        dropout: float = 0.1,
        merge_weights: bool = False
    ):
        super().__init__()
        self.base_layer = base_layer
        self.rank = rank
        self.alpha = alpha
        self.scaling = alpha / rank
        self.merge_weights = merge_weights
        
        # 冻结原始层
        for param in self.base_layer.parameters():
            param.requires_grad = False
        
        # LoRA 矩阵 A 和 B
        in_features = base_layer.in_features
        out_features = base_layer.out_features
        
        # F.linear(input, weight, bias) 计算 input @ weight^T
        # 所以 weight 的形状应该是 [out_features, in_features]
        # 对于 lora_A: 我们需要 x @ A^T，所以 lora_A 应该是 [rank, in_features]
        # 这样 F.linear(x, lora_A) 会计算 x @ lora_A^T = x @ [in_features, rank] = [..., rank]
        self.lora_A = nn.Parameter(torch.randn(rank, in_features))
        # 对于 lora_B: 我们需要 lora_intermediate @ B^T，所以 lora_B 应该是 [out_features, rank]
        # 这样 F.linear(lora_intermediate, lora_B) 会计算 lora_intermediate @ lora_B^T = lora_intermediate @ [rank, out_features] = [..., out_features]
        self.lora_B = nn.Parameter(torch.zeros(out_features, rank))
        self.lora_dropout = nn.Dropout(dropout)
        
        # 初始化
        nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
        nn.init.zeros_(self.lora_B)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        
        Args:
            x: [batch, seq_len, in_features] 或 [..., in_features]
            
        Returns:
            [batch, seq_len, out_features] 或 [..., out_features]
        
        注意: 此方法完全兼容 DDP，不修改任何模块状态
        """
        # 原始输出
        base_output = self.base_layer(x)
        
        # LoRA 适配: x @ A^T @ B^T = x @ (A @ B)^T
        # F.linear(input, weight, bias) 计算 input @ weight^T
        # 其中 weight 的形状应该是 [out_features, in_features]
        # 先通过 A: x @ A^T -> [..., rank]
        # self.lora_A 的形状是 [rank, in_features]，F.linear 会计算 x @ lora_A^T = x @ [in_features, rank] = [..., rank]
        lora_intermediate = F.linear(
            self.lora_dropout(x),
            self.lora_A,  # [rank, in_features]，F.linear 会计算 x @ lora_A^T = x @ [in_features, rank]
            None
        )  # [..., rank]
        
        # 再通过 B: lora_intermediate @ B^T -> [..., out_features]
        # self.lora_B 的形状是 [out_features, rank]，F.linear 会计算 lora_intermediate @ lora_B^T = lora_intermediate @ [rank, out_features] = [..., out_features]
        lora_output = F.linear(
            lora_intermediate,
            self.lora_B,  # [out_features, rank]，F.linear 会计算 lora_intermediate @ lora_B^T = lora_intermediate @ [rank, out_features]
            None
        )  # [..., out_features]
        
        lora_output = lora_output * self.scaling
        
        return base_output + lora_output
    
    def merge_weights_(self):
        """
        合并 LoRA 权重到基础层 (推理时使用)
        
        注意: 此方法在训练时不应调用，仅在推理时使用
        在多卡训练时，应在 DDP 包装之前或之后调用，避免状态不一致
        """
        if not self.merge_weights:
            # lora_A: [rank, in_features], lora_B: [out_features, rank]
            # 需要计算 B @ A = (A^T @ B^T)^T
            # 但 base_layer.weight 是 [out_features, in_features]
            # 所以 delta_w 应该是 [out_features, in_features]
            # delta_w = lora_B @ lora_A = [out_features, rank] @ [rank, in_features] = [out_features, in_features]
            delta_w = (self.lora_B @ self.lora_A) * self.scaling
            self.base_layer.weight.data += delta_w
            self.merge_weights = True
    
    def unmerge_weights_(self):
        """
        分离合并的权重 (训练时使用)
        
        注意: 此方法在训练时不应调用，仅在需要恢复未合并状态时使用
        在多卡训练时，应在 DDP 包装之前或之后调用，避免状态不一致
        """
        if self.merge_weights:
            # lora_A: [rank, in_features], lora_B: [out_features, rank]
            # delta_w = lora_B @ lora_A = [out_features, rank] @ [rank, in_features] = [out_features, in_features]
            delta_w = (self.lora_B @ self.lora_A) * self.scaling
            self.base_layer.weight.data -= delta_w
            self.merge_weights = False


def _detect_model_type(model: nn.Module) -> str:
    """
    自动检测模型类型: "luca_transformer" 或 "bert_model"
    """
    # 检查是否是 BertModel
    try:
        from src.common.modeling_bert import BertModel
        if isinstance(model, BertModel):
            return "bert_model"
    except ImportError:
        pass
    
    # 检查是否是 LucaTransformer
    try:
        from src.common.cross_transformer import LucaTransformer
        if isinstance(model, LucaTransformer):
            return "luca_transformer"
    except ImportError:
        pass
    
    # 通过结构特征判断
    if hasattr(model, 'encoder'):
        encoder = model.encoder
        if isinstance(encoder, nn.ModuleList) and len(encoder) > 0:
            first_layer = encoder[0]
            # BertModel: encoder.layer[0].attention.self.query
            if hasattr(first_layer, 'attention'):
                return "bert_model"
            # LucaTransformer: encoder[0].self_attn.q_proj
            elif hasattr(first_layer, 'self_attn'):
                return "luca_transformer"
    
    # 默认尝试 LucaTransformer 的命名规则
    return "luca_transformer"


def _match_module_name(name: str, pattern: str) -> bool:
    """
    精确匹配模块名称
    
    Args:
        name: 完整模块名称，如 "encoder.0.self_attn.q_proj"
        pattern: 匹配模式，如 "self_attn.q_proj" 或 "attention.self.query"
    
    Returns:
        bool: 是否匹配
    """
    # 移除权重后缀 (.weight, .bias)
    name_base = name.split('.weight')[0].split('.bias')[0]
    
    # 精确匹配：name 以 pattern 结尾
    # 例如: "encoder.0.self_attn.q_proj" 匹配 "self_attn.q_proj"
    return name_base.endswith('.' + pattern) or name_base == pattern


def apply_lora_to_lucaone_model(
    model: nn.Module,
    target_modules: List[str] = None,
    rank: int = 8,
    alpha: float = 16.0,
    dropout: float = 0.1,
    model_type: str = "auto"
) -> Dict[str, Any]:
    """
    将 LoRA 应用到 LucaOne 模型 (支持 LucaTransformer 和 BertModel)
    
    Args:
        model: LucaBase/LucaTransformer 或 LucaPPI/BertModel
        target_modules: 目标模块名称模式列表
            LucaTransformer: ["self_attn.q_proj", "self_attn.k_proj", ...]
            BertModel: ["attention.self.query", "attention.self.key", ...]
            如果为 None，会根据模型类型自动设置
        rank: LoRA rank
        alpha: LoRA alpha (缩放因子)
        dropout: LoRA dropout
        model_type: "auto", "luca_transformer", "bert_model"
        
    Returns:
        Dict: {
            "replaced_modules": List[str],  # 被替换的模块名称
            "total_lora_params": int,       # LoRA 参数总数
            "original_params": int,          # 原始参数总数
            "model_type": str               # 检测到的模型类型
        }
    """
    # 自动检测模型类型
    if model_type == "auto":
        model_type = _detect_model_type(model)
    
    # 根据模型类型设置默认目标模块和名称映射
    if target_modules is None:
        if model_type == "bert_model":
            # BertModel 的默认目标模块 (Q/K/V+Out+FFN)
            target_modules = [
                "attention.self.query",
                "attention.self.key",
                "attention.self.value",
                "attention.output.dense",
                "intermediate.dense",
                "output.dense"
            ]
        else:
            # LucaTransformer 的默认目标模块 (Q/K/V+Out+FFN)
            target_modules = [
                "self_attn.q_proj",
                "self_attn.k_proj",
                "self_attn.v_proj",
                "self_attn.out_proj",
                "fc1",
                "fc2"
            ]
    else:
        # 如果是用户提供的 LucaTransformer 格式的目标模块，需要转换为 BertModel 格式
        if model_type == "bert_model":
            # 名称映射
            name_mapping = {
                "self_attn.q_proj": "attention.self.query",
                "self_attn.k_proj": "attention.self.key",
                "self_attn.v_proj": "attention.self.value",
                "self_attn.out_proj": "attention.output.dense",
                "fc1": "intermediate.dense",
                "fc2": "output.dense"
            }
            # 转换目标模块名称
            converted_modules = []
            for pattern in target_modules:
                if pattern in name_mapping:
                    converted_modules.append(name_mapping[pattern])
                else:
                    # 保持原样（用户可能已经提供 BertModel 格式）
                    converted_modules.append(pattern)
            target_modules = converted_modules
    
    replaced_modules = []
    total_lora_params = 0
    original_params = sum(p.numel() for p in model.parameters())
    
    # 获取所有模块名称映射（用于父模块查找）
    all_modules = dict(model.named_modules())
    
    # 遍历所有模块
    for name, module in model.named_modules():
        if not isinstance(module, nn.Linear):
            continue
        
        # 检查是否匹配目标模块（使用精确匹配）
        matched = False
        for pattern in target_modules:
            if _match_module_name(name, pattern):
                matched = True
                break
        
        if not matched:
            continue
        
        # 获取父模块和子模块名称
        name_parts = name.split('.')
        if len(name_parts) < 2:
            continue
        
        parent_name = '.'.join(name_parts[:-1])
        child_name = name_parts[-1]
        
        if parent_name and parent_name in all_modules:
            parent_module = all_modules[parent_name]
        else:
            # 根模块
            parent_module = model
        
        # 创建 LoRA 包装层
        lora_layer = LoRALinear(
            base_layer=module,
            rank=rank,
            alpha=alpha,
            dropout=dropout
        )
        
        # 替换模块
        setattr(parent_module, child_name, lora_layer)
        
        replaced_modules.append(name)
        
        # 计算 LoRA 参数量
        lora_params = (
            module.in_features * rank +  # A 矩阵
            rank * module.out_features    # B 矩阵
        )
        total_lora_params += lora_params
        
        print(f"✓ Replaced {name}: {module.in_features}×{module.out_features} "
              f"→ LoRA {lora_params:,} params (rank={rank})")
    
    if len(replaced_modules) == 0:
        print(f"\n⚠️  警告: 没有找到任何匹配的模块!")
        print(f"  模型类型: {model_type}")
        print(f"  目标模块模式: {target_modules}")
        print(f"  提示: 请检查模型类型和目标模块名称是否匹配")
    
    print(f"\n总统计:")
    print(f"  模型类型: {model_type}")
    print(f"  替换模块数: {len(replaced_modules)}")
    print(f"  LoRA 参数总量: {total_lora_params:,}")
    print(f"  原始模型参数: {original_params:,}")
    if original_params > 0:
        print(f"  LoRA 占比: {100 * total_lora_params / original_params:.2f}%")
    
    return {
        "replaced_modules": replaced_modules,
        "total_lora_params": total_lora_params,
        "original_params": original_params,
        "model_type": model_type
    }


def freeze_base_model(
    model: nn.Module, 
    unfreeze_lora: bool = True,
    unfreeze_downstream: bool = True
):
    """
    冻结基础模型参数，可控制 LoRA 参数和下游任务层的训练状态
    
    Args:
        model: 应用了 LoRA 的模型
        unfreeze_lora: 是否解冻 LoRA 参数（默认: True）
        unfreeze_downstream: 是否解冻下游任务层（默认: True）
                            设置为 False 可以只训练 LoRA，用于对比实验
    
    实验设计建议：
        - 实验1: unfreeze_lora=True, unfreeze_downstream=False  -> 只训练 LoRA
        - 实验2: unfreeze_lora=False, unfreeze_downstream=True  -> 只训练下游任务层
        - 实验3: unfreeze_lora=True, unfreeze_downstream=True   -> 同时训练（默认）
    """
    # 定义需要冻结的基础编码器模块名称前缀
    encoder_prefixes = ['seq_encoder', 'matrix_encoder']
    
    # 定义需要保持可训练的下游任务层名称
    downstream_task_names = [
        'linear',  # 全连接层
        'dropout',  # Dropout层
        'hidden_layer',  # 隐藏层
        'hidden_act',  # 激活函数（虽然通常没有参数，但为了完整性）
        'classifier',  # 分类器
        'output',  # 输出层
        'seq_pooler',  # 序列池化层
        'matrix_pooler',  # 矩阵池化层
        'pooler',  # 通用池化层
    ]
    
    # 遍历所有参数，只冻结基础编码器的参数
    for name, param in model.named_parameters():
        # 检查是否属于基础编码器
        is_encoder_param = any(name.startswith(prefix + '.') or name == prefix 
                              for prefix in encoder_prefixes)
        
        # 检查是否属于下游任务层
        is_downstream_param = any(downstream_name in name 
                                  for downstream_name in downstream_task_names)
        
        # 检查是否属于 LoRA 参数
        is_lora_param = 'lora_A' in name or 'lora_B' in name
        
        # 只冻结基础编码器的参数（不包括 LoRA 参数和下游任务层）
        if is_encoder_param and not is_lora_param:
            param.requires_grad = False
        # 根据参数控制下游任务层和 LoRA 参数的可训练状态
        elif is_downstream_param:
            param.requires_grad = unfreeze_downstream
        elif is_lora_param:
            param.requires_grad = unfreeze_lora
        # 对于其他参数（如模型配置相关的），默认保持可训练
        # 这样可以避免意外冻结不应该冻结的参数
    
    # 确保 LoRA 参数可训练（双重保险）
    if unfreeze_lora:
        for name, module in model.named_modules():
            if isinstance(module, LoRALinear):
                if hasattr(module, 'lora_A'):
                    module.lora_A.requires_grad = True
                if hasattr(module, 'lora_B'):
                    module.lora_B.requires_grad = True
    
    # 确保下游任务层按参数设置（双重保险）
    if not unfreeze_downstream:
        for name, param in model.named_parameters():
            is_downstream_param = any(downstream_name in name 
                                      for downstream_name in downstream_task_names)
            if is_downstream_param:
                param.requires_grad = False


def get_lora_parameters(model: nn.Module) -> List[torch.nn.Parameter]:
    """
    获取所有 LoRA 参数
    
    Args:
        model: 应用了 LoRA 的模型
        
    Returns:
        List[Parameter]: LoRA 参数列表
    """
    lora_params = []
    for module in model.modules():
        if isinstance(module, LoRALinear):
            lora_params.append(module.lora_A)
            lora_params.append(module.lora_B)
    return lora_params


def print_trainable_parameters(model: nn.Module):
    """
    打印可训练参数统计
    """
    trainable_params = 0
    all_params = 0
    
    for name, param in model.named_parameters():
        all_params += param.numel()
        if param.requires_grad:
            trainable_params += param.numel()
            print(f"  [TRAINABLE] {name}: {param.shape} ({param.numel():,} params)")
    
    print(f"\n可训练参数: {trainable_params:,} / {all_params:,} ({100 * trainable_params / all_params:.2f}%)")


def load_lora_weights(model: nn.Module, checkpoint_path: str, strict: bool = True):
    """
    从检查点加载 LoRA 权重
    
    Args:
        model: 应用了 LoRA 的模型
        checkpoint_path: 检查点路径（可以是目录或文件）
            - 如果是目录，会按顺序查找以下文件：
              pytorch.pth, pytorch_model.bin
            - 如果是文件，直接加载
        strict: 是否严格匹配所有 LoRA 参数（默认: True）
    
    Returns:
        Dict[str, Any]: 加载统计信息
    """
    import os
    
    # 确定检查点文件路径
    if os.path.isdir(checkpoint_path):
        # 按优先级尝试不同的文件名
        possible_files = ["pytorch.pth", "pytorch_model.bin"]
        checkpoint_file = None
        for filename in possible_files:
            candidate = os.path.join(checkpoint_path, filename)
            if os.path.exists(candidate):
                checkpoint_file = candidate
                break
        
        if checkpoint_file is None:
            raise FileNotFoundError(
                f"在目录 {checkpoint_path} 中未找到检查点文件。"
                f"尝试的文件: {', '.join(possible_files)}"
            )
    else:
        checkpoint_file = checkpoint_path
        if not os.path.exists(checkpoint_file):
            raise FileNotFoundError(f"检查点文件不存在: {checkpoint_file}")
    
    print(f"加载 LoRA 权重: {checkpoint_file}")
    
    # 加载检查点
    checkpoint = torch.load(checkpoint_file, map_location="cpu")
    
    # 如果检查点包含 'model_state_dict' 或 'state_dict'，提取它
    if isinstance(checkpoint, dict):
        if 'model_state_dict' in checkpoint:
            state_dict = checkpoint['model_state_dict']
        elif 'state_dict' in checkpoint:
            state_dict = checkpoint['state_dict']
        else:
            state_dict = checkpoint
    else:
        state_dict = checkpoint
    
    # 收集模型中的所有 LoRA 参数名称
    model_lora_params = {}
    for name, module in model.named_modules():
        if isinstance(module, LoRALinear):
            if hasattr(module, 'lora_A'):
                model_lora_params[f"{name}.lora_A"] = module.lora_A
            if hasattr(module, 'lora_B'):
                model_lora_params[f"{name}.lora_B"] = module.lora_B
    
    # 过滤出 LoRA 参数（包含 'lora_A' 或 'lora_B' 的键）
    lora_state_dict = {}
    for key, value in state_dict.items():
        # 移除可能的 'module.' 前缀（DDP 模型）
        clean_key = key[7:] if key.startswith("module.") else key
        if 'lora_A' in clean_key or 'lora_B' in clean_key:
            lora_state_dict[clean_key] = value
    
    if len(lora_state_dict) == 0:
        raise ValueError(f"在检查点中未找到任何 LoRA 参数（包含 'lora_A' 或 'lora_B' 的键）")
    
    # 匹配并加载 LoRA 参数
    loaded_count = 0
    missing_keys = []
    size_mismatch_keys = []
    
    for name, module in model.named_modules():
        if isinstance(module, LoRALinear):
            lora_a_key = f"{name}.lora_A"
            lora_b_key = f"{name}.lora_B"
            
            # 尝试加载 lora_A
            if lora_a_key in lora_state_dict:
                if module.lora_A.shape == lora_state_dict[lora_a_key].shape:
                    module.lora_A.data.copy_(lora_state_dict[lora_a_key])
                    loaded_count += 1
                else:
                    size_mismatch_keys.append(f"{lora_a_key}: model {module.lora_A.shape} vs checkpoint {lora_state_dict[lora_a_key].shape}")
            elif strict:
                missing_keys.append(lora_a_key)
            
            # 尝试加载 lora_B
            if lora_b_key in lora_state_dict:
                if module.lora_B.shape == lora_state_dict[lora_b_key].shape:
                    module.lora_B.data.copy_(lora_state_dict[lora_b_key])
                    loaded_count += 1
                else:
                    size_mismatch_keys.append(f"{lora_b_key}: model {module.lora_B.shape} vs checkpoint {lora_state_dict[lora_b_key].shape}")
            elif strict:
                missing_keys.append(lora_b_key)
    
    # 打印加载结果
    print(f"✓ 成功加载 {loaded_count} 个 LoRA 参数")
    if missing_keys:
        print(f"⚠ 缺失的 LoRA 参数 ({len(missing_keys)} 个):")
        for key in missing_keys[:10]:  # 只显示前10个
            print(f"    - {key}")
        if len(missing_keys) > 10:
            print(f"    ... 还有 {len(missing_keys) - 10} 个")
    if size_mismatch_keys:
        print(f"⚠ 形状不匹配的 LoRA 参数 ({len(size_mismatch_keys)} 个):")
        for key in size_mismatch_keys[:10]:
            print(f"    - {key}")
        if len(size_mismatch_keys) > 10:
            print(f"    ... 还有 {len(size_mismatch_keys) - 10} 个")
    
    if strict and (missing_keys or size_mismatch_keys):
        raise RuntimeError(f"LoRA 权重加载失败: 缺失 {len(missing_keys)} 个参数，形状不匹配 {len(size_mismatch_keys)} 个参数")
    
    return {
        "loaded_count": loaded_count,
        "missing_keys": missing_keys,
        "size_mismatch_keys": size_mismatch_keys
    }


# 使用示例和测试
if __name__ == "__main__":
    # 示例: 如何应用到 LucaBase 模型
    print("LoRA 适配器示例")
    print("=" * 60)
    
    # 这里需要实际的模型配置
    # from src.common.luca_base import LucaBase
    # from src.common.model_config import LucaConfig
    # 
    # config = LucaConfig(...)
    # model = LucaBase(config, args)
    # 
    # # 应用 LoRA
    # apply_lora_to_lucaone_model(
    #     model=model.seq_encoder,  # 或 model.matrix_encoder
    #     target_modules=["self_attn.q_proj", "self_attn.k_proj", "self_attn.v_proj", 
    #                    "self_attn.out_proj", "fc1", "fc2"],
    #     rank=8,
    #     alpha=16.0
    # )
    # 
    # # 冻结基础模型
    # freeze_base_model(model)
    # 
    # # 打印统计
    # print_trainable_parameters(model)
    
    print("\n✓ LoRA 适配器模块加载成功")

