#!/usr/bin/env python
# encoding: utf-8
"""
实用的错义突变微调层实现
基于LucaOne嵌入，添加可获取的生物学信息进行微调
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Tuple, Optional
import math


class PracticalMutationFineTuningLayer(nn.Module):
    """
    实用的错义突变微调层
    基于LucaOne嵌入，添加保守性和理化性质信息
    """
    
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.hidden_size = config.hidden_size
        
        # 1. 突变位点检测器
        self.mutation_detector = nn.Sequential(
            nn.Linear(self.hidden_size, self.hidden_size // 2),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(self.hidden_size // 2, 1),
            nn.Sigmoid()
        )
        
        # 2. 保守性编码器
        self.conservation_encoder = nn.Sequential(
            nn.Linear(1, 64),  # 保守性分数 (0-1)
            nn.GELU(),
            nn.Linear(64, self.hidden_size // 4)
        )
        
        # 3. 氨基酸类型编码器
        self.aa_embedding = nn.Embedding(21, self.hidden_size // 4)  # 20个标准氨基酸 + 未知
        
        # 4. 理化性质编码器
        self.property_encoder = nn.Sequential(
            nn.Linear(5, 64),  # 5个理化性质
            nn.GELU(),
            nn.Linear(64, self.hidden_size // 4)
        )
        
        # 5. 序列差异编码器
        self.difference_encoder = nn.Sequential(
            nn.Linear(self.hidden_size * 2, self.hidden_size),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(self.hidden_size, self.hidden_size)
        )
        
        # 6. 突变感知注意力
        self.mutation_attention = nn.MultiheadAttention(
            embed_dim=self.hidden_size,
            num_heads=8,
            dropout=0.1,
            batch_first=True
        )
        
        # 7. 特征融合层
        self.feature_fusion = nn.Sequential(
            nn.Linear(
                self.hidden_size + self.hidden_size // 4 * 3,  # 原始 + 保守性 + AA类型 + 理化性质
                self.hidden_size
            ),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(self.hidden_size, self.hidden_size)
        )
        
        # 8. 最终分类器
        self.classifier = nn.Sequential(
            nn.Linear(self.hidden_size, 512),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(512, 128),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(128, 1),
            nn.Sigmoid()
        )
    
    def forward(self, lucaone_embeddings_a, lucaone_embeddings_b, 
                conservation_scores=None, aa_types=None, 
                physicochemical_properties=None, mutation_positions=None):
        """
        前向传播
        
        Args:
            lucaone_embeddings_a: [batch, seq_len, hidden_size] - 参考序列嵌入
            lucaone_embeddings_b: [batch, seq_len, hidden_size] - 突变序列嵌入
            conservation_scores: [batch, seq_len] - 保守性分数 (0-1)
            aa_types: [batch, seq_len] - 氨基酸类型索引
            physicochemical_properties: [batch, seq_len, 5] - 理化性质
            mutation_positions: [batch, seq_len] - 突变位点掩码
        """
        batch_size, seq_len, hidden_size = lucaone_embeddings_a.shape
        
        # 1. 计算序列差异
        sequence_diff = lucaone_embeddings_b - lucaone_embeddings_a
        diff_features = self.difference_encoder(
            torch.cat([lucaone_embeddings_a, sequence_diff], dim=-1)
        )
        
        # 2. 检测突变位点
        if mutation_positions is None:
            # 自动检测突变位点（差异最大的位置）
            mutation_scores = self.mutation_detector(diff_features).squeeze(-1)
            mutation_positions = (mutation_scores > 0.5).float()
        else:
            mutation_scores = self.mutation_detector(diff_features).squeeze(-1)
        
        # 3. 保守性编码
        if conservation_scores is not None:
            conservation_features = self.conservation_encoder(
                conservation_scores.unsqueeze(-1)
            )  # [batch, seq_len, hidden_size//4]
        else:
            # 默认中等保守性
            conservation_features = self.conservation_encoder(
                torch.ones(batch_size, seq_len, 1, device=lucaone_embeddings_a.device) * 0.5
            )
        
        # 4. 氨基酸类型编码
        if aa_types is not None:
            aa_features = self.aa_embedding(aa_types)  # [batch, seq_len, hidden_size//4]
        else:
            # 默认未知氨基酸
            aa_features = self.aa_embedding(
                torch.zeros(batch_size, seq_len, dtype=torch.long, device=lucaone_embeddings_a.device)
            )
        
        # 5. 理化性质编码
        if physicochemical_properties is not None:
            property_features = self.property_encoder(physicochemical_properties)
        else:
            # 默认中性性质
            default_properties = torch.zeros(batch_size, seq_len, 5, device=lucaone_embeddings_a.device)
            property_features = self.property_encoder(default_properties)
        
        # 6. 特征融合
        combined_features = torch.cat([
            lucaone_embeddings_a,  # 原始嵌入
            conservation_features,  # 保守性
            aa_features,           # 氨基酸类型
            property_features      # 理化性质
        ], dim=-1)
        
        fused_features = self.feature_fusion(combined_features)
        
        # 7. 突变感知注意力
        attended_features, attention_weights = self.mutation_attention(
            fused_features, fused_features, fused_features,
            key_padding_mask=None
        )
        
        # 8. 全局池化
        pooled_features = attended_features.mean(dim=1)  # [batch, hidden_size]
        
        # 9. 最终预测
        prediction = self.classifier(pooled_features)
        
        return {
            'prediction': prediction,
            'attention_weights': attention_weights,
            'mutation_scores': mutation_scores,
            'mutation_positions': mutation_positions,
            'fused_features': fused_features
        }


class PracticalMutationLoss(nn.Module):
    """实用的突变预测损失函数"""
    
    def __init__(self, alpha=0.7, beta=0.3):
        super().__init__()
        self.alpha = alpha  # 主分类损失权重
        self.beta = beta    # 注意力正则化权重
        
        self.bce_loss = nn.BCELoss()
        self.mse_loss = nn.MSELoss()
    
    def forward(self, predictions, targets, attention_weights, mutation_positions):
        """
        计算损失
        
        Args:
            predictions: [batch, 1] - 预测结果
            targets: [batch, 1] - 真实标签
            attention_weights: [batch, seq_len, seq_len] - 注意力权重
            mutation_positions: [batch, seq_len] - 突变位点掩码
        """
        # 1. 主分类损失
        main_loss = self.bce_loss(predictions, targets)
        
        # 2. 注意力正则化 - 鼓励关注突变位点
        if mutation_positions.sum() > 0:
            # 计算突变位点的注意力权重
            mutation_attention = attention_weights * mutation_positions.unsqueeze(1)
            mutation_attention = mutation_attention.sum(dim=2)  # [batch, seq_len]
            
            # 注意力应该集中在突变位点
            attention_target = mutation_positions.float()
            attention_reg = self.mse_loss(mutation_attention, attention_target)
        else:
            attention_reg = 0.0
        
        # 3. 总损失
        total_loss = self.alpha * main_loss + self.beta * attention_reg
        
        return total_loss


class PracticalMutationDataProcessor:
    """实用的突变数据处理器"""
    
    def __init__(self):
        # 氨基酸到索引的映射
        self.aa_to_idx = {
            'A': 0, 'R': 1, 'N': 2, 'D': 3, 'C': 4, 'Q': 5, 'E': 6, 'G': 7,
            'H': 8, 'I': 9, 'L': 10, 'K': 11, 'M': 12, 'F': 13, 'P': 14,
            'S': 15, 'T': 16, 'W': 17, 'Y': 18, 'V': 19, 'X': 20  # X表示未知
        }
    
    def process_sequence_pair(self, seq_a, seq_b, conservation_scores=None):
        """
        处理序列对数据
        
        Args:
            seq_a: 参考序列
            seq_b: 突变序列
            conservation_scores: 保守性分数列表
        """
        seq_len = len(seq_a)
        
        # 1. 检测突变位点
        mutation_positions = []
        for i, (aa_a, aa_b) in enumerate(zip(seq_a, seq_b)):
            mutation_positions.append(1 if aa_a != aa_b else 0)
        mutation_positions = torch.tensor(mutation_positions, dtype=torch.float32)
        
        # 2. 氨基酸类型
        aa_types_a = torch.tensor([self.aa_to_idx.get(aa, 20) for aa in seq_a])
        aa_types_b = torch.tensor([self.aa_to_idx.get(aa, 20) for aa in seq_b])
        
        # 3. 保守性分数
        if conservation_scores is None:
            conservation_scores = torch.ones(seq_len) * 0.5  # 默认中等保守性
        else:
            conservation_scores = torch.tensor(conservation_scores)
        
        # 4. 理化性质
        physicochemical_properties = self._calculate_sequence_properties(seq_a, seq_b)
        
        return {
            'mutation_positions': mutation_positions.unsqueeze(0),  # [1, seq_len]
            'aa_types': aa_types_a.unsqueeze(0),  # [1, seq_len]
            'conservation_scores': conservation_scores.unsqueeze(0),  # [1, seq_len]
            'physicochemical_properties': physicochemical_properties.unsqueeze(0)  # [1, seq_len, 5]
        }
    
    def _calculate_sequence_properties(self, seq_a, seq_b):
        """计算序列的理化性质"""
        properties = {
            'A': [0.0, 1, 1, 0, 0],  # [疏水性, 电荷, 大小, 极性, 芳香性]
            'R': [0.5, 0, 4, 1, 0],  # 正电荷
            'N': [0.2, 1, 2, 1, 0],  # 极性
            'D': [0.3, 2, 2, 1, 0],  # 负电荷
            'C': [0.8, 1, 2, 0, 0],  # 疏水性
            'Q': [0.2, 1, 3, 1, 0],  # 极性
            'E': [0.3, 2, 3, 1, 0],  # 负电荷
            'G': [0.0, 1, 0, 0, 0],  # 最小
            'H': [0.5, 0, 3, 1, 1],  # 芳香性
            'I': [1.0, 1, 3, 0, 0],  # 疏水性
            'L': [1.0, 1, 3, 0, 0],  # 疏水性
            'K': [0.5, 0, 3, 1, 0],  # 正电荷
            'M': [0.8, 1, 3, 0, 0],  # 疏水性
            'F': [0.9, 1, 4, 0, 1],  # 芳香性
            'P': [0.3, 1, 2, 0, 0],  # 特殊
            'S': [0.2, 1, 1, 1, 0],  # 极性
            'T': [0.3, 1, 2, 1, 0],  # 极性
            'W': [0.9, 1, 4, 0, 1],  # 芳香性
            'Y': [0.7, 1, 4, 1, 1],  # 芳香性
            'V': [0.8, 1, 2, 0, 0],  # 疏水性
        }
        
        seq_properties = []
        for aa_a, aa_b in zip(seq_a, seq_b):
            # 计算性质变化
            prop_a = properties.get(aa_a, [0.0, 1, 2, 0, 0])
            prop_b = properties.get(aa_b, [0.0, 1, 2, 0, 0])
            prop_change = [b - a for a, b in zip(prop_a, prop_b)]
            seq_properties.append(prop_change)
        
        return torch.tensor(seq_properties, dtype=torch.float32)


# 配置类
class PracticalMutationConfig:
    """实用突变预测配置"""
    
    def __init__(self):
        self.hidden_size = 1024
        self.learning_rate = 1e-4
        self.weight_decay = 1e-5
        self.dropout = 0.1
        self.attention_heads = 8


# 使用示例
if __name__ == "__main__":
    # 创建配置
    config = PracticalMutationConfig()
    
    # 创建模型
    model = PracticalMutationFineTuningLayer(config)
    
    # 创建数据处理器
    data_processor = PracticalMutationDataProcessor()
    
    # 示例数据
    seq_a = "MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLAGG"
    seq_b = "MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLAGG"
    # 在位置10引入突变 R->H
    seq_b = seq_b[:10] + "H" + seq_b[11:]
    
    # 处理数据
    processed_data = data_processor.process_sequence_pair(seq_a, seq_b)
    
    # 模拟LucaOne嵌入
    batch_size, seq_len = 1, len(seq_a)
    lucaone_embeddings_a = torch.randn(batch_size, seq_len, config.hidden_size)
    lucaone_embeddings_b = torch.randn(batch_size, seq_len, config.hidden_size)
    
    # 前向传播
    outputs = model(
        lucaone_embeddings_a, lucaone_embeddings_b,
        conservation_scores=processed_data['conservation_scores'],
        aa_types=processed_data['aa_types'],
        physicochemical_properties=processed_data['physicochemical_properties'],
        mutation_positions=processed_data['mutation_positions']
    )
    
    print(f"预测结果形状: {outputs['prediction'].shape}")
    print(f"注意力权重形状: {outputs['attention_weights'].shape}")
    print(f"突变分数形状: {outputs['mutation_scores'].shape}")
    print(f"突变位点: {outputs['mutation_positions'].sum().item()}")
