#!/usr/bin/env python3
"""
LoRA工具类 - 实现LoRA适配器
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any, Optional, List
import math

class LoRALinear(nn.Module):
    """LoRA线性层"""
    
    def __init__(
        self,
        in_features: int,
        out_features: int,
        rank: int = 16,
        alpha: float = 32.0,
        dropout: float = 0.1,
        bias: bool = False
    ):
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.rank = rank
        self.alpha = alpha
        self.scaling = alpha / rank
        
        # LoRA参数
        self.lora_A = nn.Parameter(torch.randn(rank, in_features) * 0.01)
        self.lora_B = nn.Parameter(torch.zeros(out_features, rank))
        self.dropout = nn.Dropout(dropout)
        
        # 原始权重（冻结）
        self.original_weight = None
        self.original_bias = None
        
    def forward(self, x):
        if self.original_weight is None:
            # 如果没有原始权重，直接使用LoRA
            lora_out = self.dropout(x) @ self.lora_A.T @ self.lora_B.T
            return lora_out * self.scaling
        else:
            # 原始输出 + LoRA输出
            original_out = F.linear(x, self.original_weight, self.original_bias)
            lora_out = self.dropout(x) @ self.lora_A.T @ self.lora_B.T
            return original_out + lora_out * self.scaling
    
    def set_original_weight(self, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
        """设置原始权重"""
        self.original_weight = weight.detach()
        self.original_bias = bias.detach() if bias is not None else None

class LoRAConfig:
    """LoRA配置"""
    
    def __init__(
        self,
        rank: int = 16,
        alpha: float = 32.0,
        dropout: float = 0.1,
        target_modules: List[str] = None,
        bias: str = "none"  # "none", "all", "lora_only"
    ):
        self.rank = rank
        self.alpha = alpha
        self.dropout = dropout
        self.target_modules = target_modules or ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
        self.bias = bias

def apply_lora_to_model(model, lora_config: LoRAConfig):
    """将LoRA应用到模型"""
    applied_layers = []
    
    for name, module in model.named_modules():
        # 检查是否是目标模块
        if any(target in name for target in lora_config.target_modules):
            if isinstance(module, nn.Linear):
                # 创建LoRA层
                lora_layer = LoRALinear(
                    in_features=module.in_features,
                    out_features=module.out_features,
                    rank=lora_config.rank,
                    alpha=lora_config.alpha,
                    dropout=lora_config.dropout,
                    bias=module.bias is not None
                )
                
                # 设置原始权重
                lora_layer.set_original_weight(module.weight, module.bias)
                
                # 替换模块
                parent_name = ".".join(name.split(".")[:-1])
                child_name = name.split(".")[-1]
                
                if parent_name:
                    parent_module = model.get_submodule(parent_name)
                    setattr(parent_module, child_name, lora_layer)
                else:
                    setattr(model, child_name, lora_layer)
                
                applied_layers.append(name)
    
    print(f"已应用LoRA到 {len(applied_layers)} 个层: {applied_layers}")
    return applied_layers

def save_lora_weights(model, save_path: str):
    """保存LoRA权重"""
    lora_state_dict = {}
    
    for name, param in model.named_parameters():
        if 'lora' in name:
            lora_state_dict[name] = param.cpu()
    
    torch.save(lora_state_dict, save_path)
    print(f"LoRA权重已保存到: {save_path}")

def load_lora_weights(model, load_path: str):
    """加载LoRA权重"""
    lora_state_dict = torch.load(load_path, map_location='cpu')
    
    # 加载LoRA参数
    for name, param in model.named_parameters():
        if name in lora_state_dict:
            param.data.copy_(lora_state_dict[name])
    
    print(f"LoRA权重已从 {load_path} 加载")

def merge_lora_weights(model):
    """合并LoRA权重到原始权重"""
    for name, module in model.named_modules():
        if isinstance(module, LoRALinear):
            if module.original_weight is not None:
                # 计算合并后的权重
                lora_weight = module.lora_B @ module.lora_A * module.scaling
                merged_weight = module.original_weight + lora_weight
                
                # 更新原始权重
                module.original_weight = merged_weight
                
                # 清空LoRA参数
                module.lora_A.data.zero_()
                module.lora_B.data.zero_()
    
    print("LoRA权重已合并到原始权重")

def get_lora_parameters(model):
    """获取LoRA参数"""
    lora_params = []
    for name, param in model.named_parameters():
        if 'lora' in name:
            lora_params.append(param)
    return lora_params

def count_lora_parameters(model):
    """统计LoRA参数数量"""
    total_params = sum(p.numel() for p in model.parameters())
    lora_params = sum(p.numel() for p in model.parameters() if 'lora' in str(p))
    
    return {
        'total_params': total_params,
        'lora_params': lora_params,
        'lora_ratio': lora_params / total_params if total_params > 0 else 0
    }

# 测试函数
def test_lora():
    """测试LoRA功能"""
    # 创建测试模型
    model = nn.Sequential(
        nn.Linear(100, 50),
        nn.ReLU(),
        nn.Linear(50, 10)
    )
    
    # 创建LoRA配置
    lora_config = LoRAConfig(
        rank=8,
        alpha=16.0,
        target_modules=["0", "2"]  # 应用到第一个和第三个线性层
    )
    
    # 应用LoRA
    applied_layers = apply_lora_to_model(model, lora_config)
    
    # 测试前向传播
    x = torch.randn(32, 100)
    y = model(x)
    
    print(f"输入形状: {x.shape}")
    print(f"输出形状: {y.shape}")
    print(f"应用LoRA的层: {applied_layers}")
    
    # 统计参数
    param_stats = count_lora_parameters(model)
    print(f"参数统计: {param_stats}")

if __name__ == "__main__":
    test_lora()
