import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import stats

class MultiOmicsCorrelationModule(nn.Module):
    """多组学特征相关性分析模块"""
    def __init__(self, feature_dim, embed_dim=64):
        super().__init__()
        self.feature_dim = feature_dim
        
        # 特征转换层
        self.feature_projector = nn.Sequential(
            nn.Linear(feature_dim, embed_dim),
            nn.ReLU(),
            nn.LayerNorm(embed_dim))
        
        # 相关性计算网络
        self.correlation_net = nn.Sequential(
            nn.Linear(embed_dim * 2, 128),
            nn.ReLU(),
            nn.Linear(128, 1),
            nn.Sigmoid())
    
    def forward(self, genomic_features):
        """
        genomic_features: 基因组特征 [batch_size, seq_len, feature_dim]
        返回: 相关性分数矩阵 [batch_size, seq_len, seq_len]
        """
        batch_size, seq_len, _ = genomic_features.shape
        
        # 特征投影
        projected_features = self.feature_projector(genomic_features)  # [batch_size, seq_len, embed_dim]
        
        # 准备特征对
        anchor1 = projected_features.unsqueeze(2).expand(-1, -1, seq_len, -1)  # [batch_size, seq_len, seq_len, embed_dim]
        anchor2 = projected_features.unsqueeze(1).expand(-1, seq_len, -1, -1)  # [batch_size, seq_len, seq_len, embed_dim]
        
        # 组合特征
        feature_pairs = torch.cat([anchor1, anchor2], dim=-1)  # [batch_size, seq_len, seq_len, embed_dim*2]
        
        # 计算相关性分数
        correlation_scores = self.correlation_net(feature_pairs).squeeze(-1)  # [batch_size, seq_len, seq_len]
        
        return correlation_scores

class DualChannelAttention(nn.Module):
    """双通道注意力机制"""
    def __init__(self, embed_dim, num_heads, local_radius=30, top_k=10, dropout=0.1):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads
        self.local_radius = local_radius
        self.top_k = top_k
        
        # 显著性通道
        self.saliency_net = nn.Sequential(
            nn.Linear(3 * self.head_dim, 4 * self.head_dim),
            nn.ReLU(),
            nn.Linear(4 * self.head_dim, 1))
        
        # 相关性通道
        self.correlation_net = nn.Sequential(
            nn.Linear(self.head_dim * 2, 128),
            nn.ReLU(),
            nn.Linear(128, 1))
        
        # 通道融合门
        self.fusion_gate = nn.Sequential(
            nn.Linear(2 * self.head_dim, 128),
            nn.ReLU(),
            nn.Linear(128, 1),
            nn.Sigmoid())
        
        # 投影层
        self.q_proj = nn.Linear(embed_dim, embed_dim)
        self.k_proj = nn.Linear(embed_dim, embed_dim)
        self.v_proj = nn.Linear(embed_dim, embed_dim)
        self.out_proj = nn.Linear(embed_dim, embed_dim)
        
        self.dropout = nn.Dropout(dropout)
        
        assert embed_dim % num_heads == 0, "embed_dim must be divisible by num_heads"

    def forward(self, x, positions, omics_correlation):
        """
        x: 输入特征 [batch_size, seq_len, embed_dim]
        positions: 位置矩阵 [batch_size, seq_len, seq_len]
        omics_correlation: 多组学相关性分数 [batch_size, seq_len, seq_len]
        """
        batch_size, seq_len, _ = x.shape
        
        # 投影到Q, K, V
        q = self.q_proj(x)
        k = self.k_proj(x)
        v = self.v_proj(x)
        
        # 多头分割
        q = q.view(batch_size, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        k = k.view(batch_size, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        v = v.view(batch_size, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        
        # === 通道1: 基于显著性的注意力 ===
        # 应用相对位置编码
        rel_pos_enc = self.positional_encoding(positions)  # [batch_size, seq_len, seq_len, head_dim]
        rel_pos_enc = rel_pos_enc.permute(0, 3, 1, 2)  # [batch_size, head_dim, seq_len, seq_len]
        rel_pos_enc = rel_pos_enc.view(batch_size, self.num_heads, self.head_dim, seq_len, seq_len)
        rel_pos_enc = rel_pos_enc.permute(0, 1, 3, 4, 2)  # [batch_size, num_heads, seq_len, seq_len, head_dim]
        
        # 扩展query和key
        q_expanded = q.unsqueeze(3)  # [batch_size, num_heads, seq_len, 1, head_dim]
        k_expanded = k.unsqueeze(2)  # [batch_size, num_heads, 1, seq_len, head_dim]
        
        # 特征拼接用于显著性计算
        feature_concat = torch.cat([
            q_expanded.expand(-1, -1, -1, seq_len, -1),
            k_expanded.expand(-1, -1, seq_len, -1, -1),
            rel_pos_enc
        ], dim=-1)
        
        # 计算显著性得分
        saliency_scores = self.saliency_net(feature_concat).squeeze(-1)
        
        # 应用局部窗口约束
        position_mask = torch.abs(positions) <= self.local_radius
        position_mask = position_mask.unsqueeze(1)
        saliency_scores = saliency_scores.masked_fill(~position_mask, float('-inf'))
        
        # 选择Top-K显著交互
        topk_scores, topk_indices = torch.topk(
            saliency_scores, 
            k=min(self.top_k, self.local_radius*2), 
            dim=-1
        )
        
        # 创建显著性掩码
        saliency_mask = torch.zeros_like(saliency_scores)
        saliency_mask.scatter_(-1, topk_indices, 1)
        
        # === 通道2: 基于多组学相关性的注意力 ===
        # 计算相关性注意力分数
        q_corr = q.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_len, -1)  # [batch_size, seq_len, num_heads*head_dim]
        k_corr = k.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_len, -1)  # [batch_size, seq_len, num_heads*head_dim]
        
        # 扩展特征
        q_corr_exp = q_corr.unsqueeze(2).expand(-1, -1, seq_len, -1)  # [batch_size, seq_len, seq_len, embed_dim]
        k_corr_exp = k_corr.unsqueeze(1).expand(-1, seq_len, -1, -1)  # [batch_size, seq_len, seq_len, embed_dim]
        
        # 组合特征
        corr_features = torch.cat([q_corr_exp, k_corr_exp], dim=-1)
        correlation_attn = self.correlation_net(corr_features).squeeze(-1)  # [batch_size, seq_len, seq_len]
        
        # 应用多组学相关性分数作为先验
        correlation_attn = correlation_attn * omics_correlation
        
        # 转换为多头格式
        correlation_attn = correlation_attn.unsqueeze(1).expand(-1, self.num_heads, -1, -1)  # [batch_size, num_heads, seq_len, seq_len]
        
        # === 通道融合 ===
        # 计算融合门控
        fusion_features = torch.cat([
            saliency_scores.unsqueeze(-1), 
            correlation_attn.unsqueeze(-1)
        ], dim=-1)  # [batch_size, num_heads, seq_len, seq_len, 2]
        
        fusion_features = fusion_features.view(-1, 2)  # [batch_size*num_heads*seq_len*seq_len, 2]
        gate_values = self.fusion_gate(fusion_features).view(
            batch_size, self.num_heads, seq_len, seq_len
        )  # [batch_size, num_heads, seq_len, seq_len]
        
        # 融合两个通道的注意力分数
        fused_attn = gate_values * saliency_scores + (1 - gate_values) * correlation_attn
        
        # 应用显著性掩码约束
        fused_attn = fused_attn.masked_fill(saliency_mask == 0, float('-inf'))
        
        # 计算注意力权重
        attn_weights = F.softmax(fused_attn, dim=-1)
        attn_weights = self.dropout(attn_weights)
        
        # 应用注意力权重
        attn_output = torch.matmul(attn_weights, v)
        
        # 合并多头
        attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
        attn_output = attn_output.view(batch_size, seq_len, self.embed_dim)
        
        # 输出投影
        attn_output = self.out_proj(attn_output)
        return attn_output, attn_weights, gate_values

class EnhancedHiCLoopPredictor(nn.Module):
    """增强的Hi-C Loop预测模型"""
    def __init__(self, feature_dim, embed_dim=256, num_heads=8, num_layers=4, 
                 local_radius=30, p_threshold=0.001, dropout=0.1):
        super().__init__()
        self.local_radius = local_radius
        self.p_threshold = p_threshold
        
        # 多组学相关性模块
        self.omics_correlation = MultiOmicsCorrelationModule(
            feature_dim=feature_dim, 
            embed_dim=64
        )
        
        # 特征编码器
        self.feature_encoder = nn.Sequential(
            nn.Linear(feature_dim, embed_dim * 2),
            nn.ReLU(),
            nn.LayerNorm(embed_dim * 2),
            nn.Linear(embed_dim * 2, embed_dim),
            nn.ReLU()
        )
        
        # 位置编码
        self.positional_encoding = PositionalEncoding(embed_dim)
        
        # 双通道注意力层
        self.attention_layers = nn.ModuleList([
            DualChannelAttention(embed_dim, num_heads, local_radius, top_k=10, dropout=dropout)
            for _ in range(num_layers)
        ])
        
        # 层归一化
        self.layer_norms = nn.ModuleList([
            nn.LayerNorm(embed_dim) for _ in range(num_layers)
        ])
        
        # Loop预测头
        self.loop_head = nn.Sequential(
            nn.Linear(embed_dim * 3, embed_dim * 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(embed_dim * 2, 1),
            nn.Sigmoid()
        )
        
        # 显著性检测器
        self.sig_detector = SignificantInteractionDetector(local_radius, p_threshold)

    def forward(self, hic_matrix, genomic_features):
        batch_size, seq_len, _ = hic_matrix.shape
        
        # === 1. 显著性检测 ===
        significance_masks = []
        for i in range(batch_size):
            mask = self.sig_detector.detect(hic_matrix[i].cpu().numpy())
            significance_masks.append(torch.tensor(mask, dtype=torch.float32))
        significance_mask = torch.stack(significance_masks).to(hic_matrix.device)
        
        # === 2. 多组学相关性分析 ===
        omics_correlation = self.omics_correlation(genomic_features)
        
        # === 3. 特征增强 ===
        # 编码基因组特征
        x = self.feature_encoder(genomic_features)
        
        # 创建位置矩阵
        positions = torch.abs(torch.arange(seq_len).unsqueeze(0) - torch.arange(seq_len).unsqueeze(1))
        positions = positions.unsqueeze(0).expand(batch_size, -1, -1).to(hic_matrix.device)
        
        # 应用位置编码
        x, rel_pos_enc = self.positional_encoding(x, positions)
        
        # === 4. 双通道注意力处理 ===
        gate_values_list = []
        for i, attn_layer in enumerate(self.attention_layers):
            residual = x
            x = self.layer_norms[i](x)
            attn_out, attn_weights, gate_values = attn_layer(x, positions, omics_correlation)
            x = residual + attn_out
            gate_values_list.append(gate_values)
        
        # === 5. Loop预测 ===
        anchor1 = x.unsqueeze(2).expand(-1, -1, seq_len, -1)
        anchor2 = x.unsqueeze(1).expand(-1, seq_len, -1, -1)
        positional_feat = rel_pos_enc.mean(dim=-1).unsqueeze(-1)
        pair_features = torch.cat([anchor1, anchor2, positional_feat], dim=-1)
        
        # 预测Loop概率
        loop_probs = self.loop_head(pair_features).squeeze(-1)
        loop_probs = loop_probs * significance_mask  # 只保留显著区域的预测
        
        return loop_probs, significance_mask, omics_correlation, gate_values_list