import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Optional, Union
import yaml
import os
import logging
import math
from torch.nn.init import trunc_normal_

logger = logging.getLogger(__name__)

# ========== softplus激活函数（单层softplus） ==========
class softSoftplus(nn.Module):
    def __init__(self, beta=1, threshold=20):
        super().__init__()
        self.beta = beta
        self.threshold = threshold
    def forward(self, x):
        return torch.nn.functional.softplus(x, beta=self.beta, threshold=self.threshold)

class RegionEmbed(nn.Module):
    """区域嵌入模块 - 更接近原模型的实现"""
    def __init__(self, config: Dict):
        super().__init__()
        self.num_features = config['num_features']  # 357 (282 motif + 1 accessibility + 74 condition, 不含标签)
        self.embed_dim = config['embed_dim']        # 768
        # 移除硬编码断言，使用配置中的特征数
        assert self.num_features > 0, f"RegionEmbed输入特征数必须大于0，实际为{self.num_features}"
        # 直接线性投影，类似原模型
        self.embed = nn.Linear(self.num_features, self.embed_dim)
        # 使用原模型的初始化方式
        trunc_normal_(self.embed.weight, std=0.02)
        if self.embed.bias is not None:
            nn.init.constant_(self.embed.bias, 0)
    def forward(self, x):
        assert x.shape[-1] == self.num_features, f"RegionEmbed输入特征维数应为{self.num_features}，实际为{x.shape[-1]}"
        return self.embed(x)

class Attention(nn.Module):
    """自定义注意力机制 - 更接近原模型"""
    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
        super().__init__()
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.scale = qk_scale or head_dim ** -0.5

        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

    def forward(self, x):
        B, N, C = x.shape
        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        q, k, v = qkv[0], qkv[1], qkv[2]

        attn = (q @ k.transpose(-2, -1)) * self.scale
        attn = attn.softmax(dim=-1)
        attn = self.attn_drop(attn)

        x = (attn @ v).transpose(1, 2).reshape(B, N, C)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x

class Mlp(nn.Module):
    """MLP模块 - 更接近原模型"""
    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1 = nn.Linear(in_features, hidden_features)
        self.act = act_layer()
        self.fc2 = nn.Linear(hidden_features, out_features)
        self.drop = nn.Dropout(drop)

    def forward(self, x):
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop(x)
        x = self.fc2(x)
        x = self.drop(x)
        return x

class Block(nn.Module):
    """Transformer块 - 更接近原模型"""
    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
        super().__init__()
        self.norm1 = norm_layer(dim)
        self.attn = Attention(
            dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
        self.drop_path = nn.Identity()  # 简化，不使用drop_path
        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)

    def forward(self, x):
        x = x + self.drop_path(self.attn(self.norm1(x)))
        x = x + self.drop_path(self.mlp(self.norm2(x)))
        return x

class GETTransformer(nn.Module):
    """GET Transformer - 更接近原模型"""
    def __init__(self, config: Dict):
        super().__init__()
        self.embed_dim = config['embed_dim']
        self.num_heads = config['num_heads']
        self.num_layers = config['num_layers']
        self.dropout = config['dropout']
        
        # Transformer块
        self.blocks = nn.ModuleList([
            Block(
                dim=self.embed_dim,
                num_heads=self.num_heads,
                mlp_ratio=4,
                qkv_bias=True,
                drop=self.dropout,
                attn_drop=self.dropout,
                act_layer=nn.GELU,
                norm_layer=nn.LayerNorm
            )
            for i in range(self.num_layers)
        ])
        
        self.norm = nn.LayerNorm(self.embed_dim)
        
        # 初始化权重
        self.apply(self._init_weights)
    
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x):
        for blk in self.blocks:
            x = blk(x)
        x = self.norm(x)
        return x

class ExpressionHead(nn.Module):
    """表达预测头 - 更接近原模型"""
    def __init__(self, config: Dict):
        super().__init__()
        self.embed_dim = config['embed_dim']
        self.output_dim = config['output_dim']
        
        # 单层线性，类似原模型
        self.head = nn.Linear(self.embed_dim, self.output_dim)
        
        # 使用原模型的初始化方式
        trunc_normal_(self.head.weight, std=0.02)
        self.head.weight.data.mul_(0.001)
        if self.head.bias is not None:
            self.head.bias.data.mul_(0.001)
    
    def forward(self, x):
        return self.head(x)

class LoRALinear(nn.Module):
    """LoRA 线性层包装器 (冻结原权重, 仅训练低秩适配器)."""

    def __init__(self, base: nn.Linear, rank: int = 4, alpha: int = 16):
        super().__init__()
        self.base = base
        self.rank = rank
        self.alpha = alpha

        # 冻结原始权重
        self.base.weight.requires_grad = False
        if self.base.bias is not None:
            self.base.bias.requires_grad = False

        # LoRA 参数
        self.lora_A = nn.Parameter(torch.zeros(rank, self.base.in_features))
        self.lora_B = nn.Parameter(torch.zeros(self.base.out_features, rank))

        # 初始化
        nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
        nn.init.zeros_(self.lora_B)

        self.scaling = self.alpha / self.rank

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        result = self.base(x)
        lora_update = (x @ self.lora_A.t()) @ self.lora_B.t() * self.scaling
        return result + lora_update

    @property
    def weight(self):
        """返回基础权重，用于兼容性"""
        return self.base.weight
    
    @property
    def bias(self):
        """返回基础偏置，用于兼容性"""
        return self.base.bias
    
    @property
    def in_features(self):
        """返回输入特征数"""
        return self.base.in_features
    
    @property
    def out_features(self):
        """返回输出特征数"""
        return self.base.out_features

class YeastModel(nn.Module):
    def __init__(self, cfg: Dict, use_lora=False, lora_rank=4, lora_alpha=16, lora_layers: Optional[List[str]] = None):
        super().__init__()
        self.use_lora = use_lora
        self.lora_rank = lora_rank
        self.lora_alpha = lora_alpha
        
        # 区域嵌入 - 更接近原模型
        self.region_embed = RegionEmbed(cfg['region_embed'])
        
        # GET Transformer - 更接近原模型
        self.encoder = GETTransformer(cfg['encoder'])
        
        # 表达预测头 - 更接近原模型
        self.head_exp = ExpressionHead(cfg['head_exp'])
        
        # CLS token - 原模型的关键组件
        self.cls_token = nn.Parameter(torch.zeros(1, 1, cfg['encoder']['embed_dim']))
        
        # 损失函数
        self.loss_fn = nn.MSELoss(reduction='mean')
        
        # 初始化权重
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(self, x: Dict[str, torch.Tensor]) -> torch.Tensor:
        # 直接使用motif_features作为357维输入 (motif+accessibility+condition)
        all_feat = x['motif_features']  # (B, 1, 357)
        
        # 区域嵌入
        x = self.region_embed(all_feat)  # (B, 1, 768)
        
        # 添加CLS token，类似原模型
        B, N, C = x.shape
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)  # (B, 2, 768)
        
        # Transformer编码
        x = self.encoder(x)  # (B, 2, 768)
        
        # 移除CLS token
        x = x[:, 1:]  # (B, 1, 768)
        
        # 表达预测
        predictions = self.head_exp(x)  # (B, 1, 1)
        
        # Softplus激活，类似原模型
        predictions = nn.Softplus()(predictions)
        
        return predictions

    def compute_loss(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
        # predictions: [batch, seq, 1]，targets: [batch, seq]
        if predictions.shape[-1] == 1:
            predictions = predictions.squeeze(-1)
        return self.loss_fn(predictions, targets)

    def _inject_lora(self, module: nn.Module, rank: int, alpha: int):
        """递归替换 module 中的 nn.Linear 为 LoRA 版本."""
        for name, child in module.named_children():
            if isinstance(child, nn.Linear):
                setattr(module, name, LoRALinear(child, rank, alpha))
            else:
                self._inject_lora(child, rank, alpha)
    
    def inject_lora_adapters(self, lora_layers: Optional[List[str]] = None):
        """注入LoRA适配器"""
        if not self.use_lora:
            return
            
        logger.info(f"注入 LoRA 适配器 (rank={self.lora_rank}, alpha={self.lora_alpha})")
        target_layers = lora_layers if lora_layers else ['encoder', 'head_exp']
        
        for layer_name in target_layers:
            module = getattr(self, layer_name, None)
            if module is not None:
                self._inject_lora(module, self.lora_rank, self.lora_alpha)
                logger.info(f"已为 {layer_name} 注入 LoRA 适配器")
            else:
                logger.warning(f"LoRA 目标层 {layer_name} 不存在，跳过注入")

def create_model(config_path: str):
    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)
    # 传入model下的model字段
    return YeastModel(
        cfg=config['model']['model'],
        use_lora=config['training'].get('use_lora', False),
        lora_rank=config['training'].get('lora_rank', 4),
        lora_alpha=config['training'].get('lora_alpha', 16),
        lora_layers=config['training'].get('lora_layers', None)
    )

# YeastModel的cfg参数应为如下结构：
# cfg = {
#   'region_embed': {'num_features': 298, 'embed_dim': 768},
#   'encoder': {'embed_dim': 768, 'num_heads': 12, 'num_layers': 12, 'dropout': 0.1},
#   'head_exp': {'embed_dim': 768, 'output_dim': 1},
#   ...
# } 