#!/usr/bin/env python3
"""
LoRA配置类

定义LoRA训练的参数配置
"""

from dataclasses import dataclass
from typing import List, Optional
from peft import LoraConfig


@dataclass
class LoRAConfig:
    """LoRA配置类"""
    
    # LoRA基本参数
    r: int = 16  # LoRA rank
    lora_alpha: int = 32  # LoRA alpha
    target_modules: List[str] = None  # 目标模块
    lora_dropout: float = 0.1  # LoRA dropout
    bias: str = "none"  # bias类型
    task_type: str = "FEATURE_EXTRACTION"  # 任务类型
    
    # 训练参数
    learning_rate: float = 1e-4
    batch_size: int = 4
    gradient_accumulation_steps: int = 4
    max_steps: int = 1000
    warmup_steps: int = 100
    save_steps: int = 200
    eval_steps: int = 200
    logging_steps: int = 50
    
    # 模型参数
    base_model: str = "sam_vit_b"
    checkpoint_path: Optional[str] = None
    
    def __post_init__(self):
        """初始化后处理"""
        if self.target_modules is None:
            self.target_modules = [
                "q_proj", "k_proj", "v_proj", "o_proj",  # 注意力层
                "gate_proj", "up_proj", "down_proj"  # MLP层
            ]
    
    def to_peft_config(self) -> LoraConfig:
        """转换为PEFT LoRA配置"""
        return LoraConfig(
            r=self.r,
            lora_alpha=self.lora_alpha,
            target_modules=self.target_modules,
            lora_dropout=self.lora_dropout,
            bias=self.bias,
            task_type=self.task_type
        )
    
    @classmethod
    def from_dict(cls, config_dict: dict) -> 'LoRAConfig':
        """从字典创建配置"""
        return cls(**config_dict)
    
    def to_dict(self) -> dict:
        """转换为字典"""
        return {
            'r': self.r,
            'lora_alpha': self.lora_alpha,
            'target_modules': self.target_modules,
            'lora_dropout': self.lora_dropout,
            'bias': self.bias,
            'task_type': self.task_type,
            'learning_rate': self.learning_rate,
            'batch_size': self.batch_size,
            'gradient_accumulation_steps': self.gradient_accumulation_steps,
            'max_steps': self.max_steps,
            'warmup_steps': self.warmup_steps,
            'save_steps': self.save_steps,
            'eval_steps': self.eval_steps,
            'logging_steps': self.logging_steps,
            'base_model': self.base_model,
            'checkpoint_path': self.checkpoint_path
        }
