import torch
from torch import nn
from torch.nn import functional as F
from muta.model.performer import Performer
from muta.model.modules import MLPBlock, _get_activation_fn, AbsolutePositionalEncoding 
from typing import Dict, Any, Optional 

class PerformerMutationLM(nn.Module):
    """
    用于DNA突变预训练任务（Masked Mutation Modeling）的Performer模型。
    将多种突变特征转换为嵌入，组合后输入到Performer编码器，
    最后输出被掩码突变五核苷酸上下文的预测。
    """
    def __init__(self,
                    num_pentanucleotide_contexts: int, 
                    num_chromosome_ids: int,         
                    num_exon_strand_ids: int,        
                    num_sequencing_methods: int,     
                    num_relative_chrom_positions: int, 
                    dim: int = 64,                     
                    depth: int = 3,                    
                    heads: int = 4,                    
                    dim_head: int = 16,                
                    evo2_embedding_dim: int = 0,       # 现在是组合后的总维度
                    mlp_hidden_dim_mult: int = 2, 
                    mlp_n_blocks_discrete: int = 1, 
                    mlp_n_blocks_continuous: int = 2, 
                    mlp_activation: str = "gelu", 
                    mlp_norm_type: str = "rmsnorm", 
                    mlp_bias: bool = False, 
                    use_absolute_genomic_pos_emb: bool = True,
                    max_genomic_position: int = 300_000_000,
                 **kwargs):
        super().__init__()
        self.num_pentanucleotide_contexts = num_pentanucleotide_contexts 
        self.original_dim = dim 
        self.depth = depth
        self.heads = heads
        self.dim_head = dim_head
        self.evo2_embedding_dim = evo2_embedding_dim
        self.performer_input_dim = self.original_dim 
        self.dim = self.performer_input_dim 

        self.pentanucleotide_context_embedding = nn.Embedding(num_pentanucleotide_contexts + 2, self.original_dim)
        self.chromosome_embedding = nn.Embedding(num_chromosome_ids, self.original_dim)
        self.exon_strand_embedding = nn.Embedding(num_exon_strand_ids, self.original_dim)
        self.sequencing_method_embedding = nn.Embedding(num_sequencing_methods, self.original_dim)
        self.relative_chrom_pos_embedding = nn.Embedding(num_relative_chrom_positions, self.original_dim)

        self.use_absolute_genomic_pos_emb = use_absolute_genomic_pos_emb
        if self.use_absolute_genomic_pos_emb:
            self.absolute_genomic_pos_emb = AbsolutePositionalEncoding(
                d_embedding=self.original_dim, 
                max_position=max_genomic_position, 
                dropout=kwargs.get('attn_dropout', 0.0)
            )
        else:
            self.absolute_genomic_pos_emb = None
        
        mlp_hidden_dim = self.original_dim * mlp_hidden_dim_mult
        mlp_dropout = kwargs.get('ff_dropout', 0.1)

        self.pentanucleotide_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        self.chromosome_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        self.exon_strand_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        self.sequencing_method_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        self.relative_chrom_pos_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        if self.use_absolute_genomic_pos_emb:
            self.absolute_genomic_pos_encoder = MLPBlock(
                d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
                dropout=mlp_dropout, n_blocks=mlp_n_blocks_continuous, activation=mlp_activation,
                norm_type=mlp_norm_type, bias=mlp_bias
            )
        else:
            self.absolute_genomic_pos_encoder = None

        self.use_evo2_embeddings = (evo2_embedding_dim > 0)
        if self.use_evo2_embeddings:
            self.evo2_encoder = MLPBlock(
                d_in=evo2_embedding_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
                dropout=mlp_dropout, n_blocks=mlp_n_blocks_continuous, activation=mlp_activation,
                norm_type=mlp_norm_type, bias=mlp_bias
            )
        else:
            self.evo2_encoder = None
        
        num_modalities_to_fuse = 5 + \
                                 (1 if self.use_evo2_embeddings else 0) + \
                                 (1 if self.use_absolute_genomic_pos_emb else 0) 
        self.fusion_weights = nn.Parameter(torch.ones(num_modalities_to_fuse))
        self.fusion_norm = nn.LayerNorm(self.original_dim)

        self.cls_token = nn.Parameter(torch.randn(1, 1, self.original_dim))

        self.performer = Performer(
            dim=self.original_dim,
            depth=depth,
            heads=heads,
            dim_head=dim_head,
            **kwargs
        )
        self.pre_lm_head_norm = nn.LayerNorm(self.original_dim)
        self.lm_head = nn.Linear(self.original_dim, num_pentanucleotide_contexts + 2) 

    def forward(self, inputs) -> Dict[str, Any]: 
        pentanucleotide_context_ids = inputs['pentanucleotide_context_ids'] 
        chromosome_ids = inputs['chromosome_ids']
        original_positions = inputs['original_positions']
        relative_chrom_pos_ids = inputs['relative_chrom_pos_ids']
        exon_strand_ids = inputs['exon_strand_ids']
        sequencing_method_ids = inputs['sequencing_method_ids']
        attention_mask_from_inputs = inputs['attention_mask']
        ref_evo2_embeddings = inputs['ref_evo2_embeddings']

        batch_size, seq_len = pentanucleotide_context_ids.shape 

        emb_pentanucleotide_context = self.pentanucleotide_context_embedding(pentanucleotide_context_ids)
        emb_chromosome = self.chromosome_embedding(chromosome_ids)
        emb_exon_strand = self.exon_strand_embedding(exon_strand_ids)
        emb_sequencing_method = self.sequencing_method_embedding(sequencing_method_ids)
        emb_relative_chrom_pos = self.relative_chrom_pos_embedding(relative_chrom_pos_ids)

        encoded_embeddings = []
        encoded_embeddings.append(self.pentanucleotide_encoder(emb_pentanucleotide_context))
        encoded_embeddings.append(self.chromosome_encoder(emb_chromosome))
        encoded_embeddings.append(self.exon_strand_encoder(emb_exon_strand))
        encoded_embeddings.append(self.sequencing_method_encoder(emb_sequencing_method))
        encoded_embeddings.append(self.relative_chrom_pos_encoder(emb_relative_chrom_pos))
        
        if self.use_absolute_genomic_pos_emb and self.absolute_genomic_pos_emb is not None:
            raw_absolute_genomic_pos_emb = self.absolute_genomic_pos_emb(original_positions)
            encoded_absolute_genomic_pos_emb = self.absolute_genomic_pos_encoder(raw_absolute_genomic_pos_emb)
            encoded_embeddings.append(encoded_absolute_genomic_pos_emb)
        if self.use_evo2_embeddings and self.evo2_encoder is not None:
            processed_evo2_embeddings = self.evo2_encoder(ref_evo2_embeddings)
            encoded_embeddings.append(processed_evo2_embeddings)
    
        # 使用可学习权重融合特征
        normalized_fusion_weights = F.softmax(self.fusion_weights, dim=-1)
        fused_embedding = sum(w * emb for w, emb in zip(normalized_fusion_weights, encoded_embeddings))
        x = self.fusion_norm(fused_embedding)

        cls_tokens = self.cls_token.expand(batch_size, -1, -1) 
        x = torch.cat((cls_tokens, x), dim=1)

        cls_attention_mask = torch.ones(batch_size, 1, dtype=attention_mask_from_inputs.dtype, device=attention_mask_from_inputs.device)
        attention_mask_with_cls = torch.cat((cls_attention_mask, attention_mask_from_inputs), dim=1)


        performer_output = self.performer(x, mask=attention_mask_with_cls)
        
        # 提取 CLS Token 的输出作为样本嵌入
        sample_embedding = performer_output[:, 0, :] 
        
        # 提取原始序列部分的输出（跳过 CLS Token 的输出）
        sequence_output = performer_output[:, 1:, :] 

        normalized_sequence_output = self.pre_lm_head_norm(sequence_output)
        logits = self.lm_head(normalized_sequence_output)

        return {
            'logits': logits,
            'sample_embedding': sample_embedding
        }


class PerformerMutationClassifier(nn.Module):
    """
    用于DNA突变分类任务的Performer模型（例如预测肿瘤起源）。
    它使用Performer编码器处理突变序列，然后通过一个分类头输出样本级别的预测。
    此模块的 forward 方法已经正确使用了 CLS Token 作为样本嵌入。
    """
    def __init__(self,
                num_pentanucleotide_contexts: int,
                num_chromosome_ids: int,
                num_exon_strand_ids: int,        
                num_sequencing_methods: int,
                num_relative_chrom_positions: int, 
                num_classes: int,
                dim: int = 64,
                depth: int = 3,
                heads: int = 4,
                dim_head: int = 16,
                evo2_embedding_dim: int = 0, # 现在是组合后的总维度
                mlp_hidden_dim_mult: int = 2, 
                mlp_n_blocks_discrete: int = 1, 
                mlp_n_blocks_continuous: int = 3, 
                mlp_activation: str = "gelu", 
                mlp_norm_type: str = "rmsnorm", 
                mlp_bias: bool = False, 
                use_absolute_genomic_pos_emb: bool = True,
                max_genomic_position: int = 300_000_000, 
                 **kwargs):
        super().__init__()

        self.num_pentanucleotide_contexts = num_pentanucleotide_contexts
        self.num_classes = num_classes
        self.original_dim = dim
        self.depth = depth
        self.heads = heads
        self.dim_head = dim_head
        self.evo2_embedding_dim = evo2_embedding_dim

        self.performer_input_dim = self.original_dim
        self.dim = self.performer_input_dim

        self.pentanucleotide_context_embedding = nn.Embedding(num_pentanucleotide_contexts + 2, self.original_dim)
        self.chromosome_embedding = nn.Embedding(num_chromosome_ids, self.original_dim)
        self.exon_strand_embedding = nn.Embedding(num_exon_strand_ids, self.original_dim)
        self.sequencing_method_embedding = nn.Embedding(num_sequencing_methods, self.original_dim)
        self.relative_chrom_pos_embedding = nn.Embedding(num_relative_chrom_positions, self.original_dim)

        self.use_absolute_genomic_pos_emb = use_absolute_genomic_pos_emb
        if self.use_absolute_genomic_pos_emb:
            self.absolute_genomic_pos_emb = AbsolutePositionalEncoding(
                d_embedding=self.original_dim, 
                max_position=max_genomic_position, 
                dropout=kwargs.get('attn_dropout', 0.0)
            )
        else:
            self.absolute_genomic_pos_emb = None

        mlp_hidden_dim = self.original_dim * mlp_hidden_dim_mult
        mlp_dropout = kwargs.get('ff_dropout', 0.1)
        self.pentanucleotide_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        self.chromosome_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        self.exon_strand_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        self.sequencing_method_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        self.relative_chrom_pos_encoder = MLPBlock(
            d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
            dropout=mlp_dropout, n_blocks=mlp_n_blocks_discrete, activation=mlp_activation,
            norm_type=mlp_norm_type, bias=mlp_bias
        )
        if self.use_absolute_genomic_pos_emb:
            self.absolute_genomic_pos_encoder = MLPBlock(
                d_in=self.original_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
                dropout=mlp_dropout, n_blocks=mlp_n_blocks_continuous, activation=mlp_activation,
                norm_type=mlp_norm_type, bias=mlp_bias
            )
        else:
            self.absolute_genomic_pos_encoder = None
        
        self.use_evo2_embeddings = (evo2_embedding_dim > 0)
        if self.use_evo2_embeddings:
            self.evo2_encoder = MLPBlock(
                d_in=evo2_embedding_dim, d_hidden=mlp_hidden_dim, d_out=self.original_dim,
                dropout=mlp_dropout, n_blocks=mlp_n_blocks_continuous, activation=mlp_activation,
                norm_type=mlp_norm_type, bias=mlp_bias
            )
        else:
            self.evo2_encoder = None
        
        num_modalities_to_fuse = 5 + \
                                 (1 if self.use_evo2_embeddings else 0) + \
                                 (1 if self.use_absolute_genomic_pos_emb else 0) 
        self.fusion_weights = nn.Parameter(torch.ones(num_modalities_to_fuse))
        self.fusion_norm = nn.LayerNorm(self.original_dim)

        self.cls_token = nn.Parameter(torch.randn(1, 1, self.original_dim))

        self.performer = Performer(
            dim=self.original_dim,
            depth=depth,
            heads=heads,
            dim_head=dim_head,
            **kwargs
        )

        self.classifier_head = nn.Sequential(
            nn.LayerNorm(self.original_dim),
            nn.GELU(),
            nn.Linear(self.original_dim, self.original_dim // 2),
            nn.GELU(),
            nn.Linear(self.original_dim // 2, num_classes)
        )

    def forward(self, inputs):
        pentanucleotide_context_ids = inputs['pentanucleotide_context_ids']
        chromosome_ids = inputs['chromosome_ids']
        original_positions = inputs['original_positions']
        relative_chrom_pos_ids = inputs['relative_chrom_pos_ids']
        exon_strand_ids = inputs['exon_strand_ids']
        sequencing_method_ids = inputs['sequencing_method_ids']
        attention_mask_from_inputs = inputs['attention_mask']
        ref_evo2_embeddings = inputs['ref_evo2_embeddings']

        batch_size, seq_len = pentanucleotide_context_ids.shape

        # 获取原始嵌入
        emb_pentanucleotide_context = self.pentanucleotide_context_embedding(pentanucleotide_context_ids)
        emb_chromosome = self.chromosome_embedding(chromosome_ids)
        emb_exon_strand = self.exon_strand_embedding(exon_strand_ids)
        emb_sequencing_method = self.sequencing_method_embedding(sequencing_method_ids)
        emb_relative_chrom_pos = self.relative_chrom_pos_embedding(relative_chrom_pos_ids)

        encoded_embeddings = []
        encoded_embeddings.append(self.pentanucleotide_encoder(emb_pentanucleotide_context))
        encoded_embeddings.append(self.chromosome_encoder(emb_chromosome))
        encoded_embeddings.append(self.exon_strand_encoder(emb_exon_strand))
        encoded_embeddings.append(self.sequencing_method_encoder(emb_sequencing_method))
        encoded_embeddings.append(self.relative_chrom_pos_encoder(emb_relative_chrom_pos))
        
        if self.use_absolute_genomic_pos_emb and self.absolute_genomic_pos_emb is not None:
            raw_absolute_genomic_pos_emb = self.absolute_genomic_pos_emb(original_positions)
            encoded_absolute_genomic_pos_emb = self.absolute_genomic_pos_encoder(raw_absolute_genomic_pos_emb)
            encoded_embeddings.append(encoded_absolute_genomic_pos_emb)

        if self.use_evo2_embeddings and self.evo2_encoder is not None:
            processed_evo2_embeddings = self.evo2_encoder(ref_evo2_embeddings)
            encoded_embeddings.append(processed_evo2_embeddings)
        
        # 使用可学习权重融合特征
        normalized_fusion_weights = F.softmax(self.fusion_weights, dim=-1)
        fused_embedding = sum(w * emb for w, emb in zip(normalized_fusion_weights, encoded_embeddings))
        x = self.fusion_norm(fused_embedding)

        cls_tokens = self.cls_token.expand(batch_size, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)

        cls_attention_mask = torch.ones(batch_size, 1, dtype=attention_mask_from_inputs.dtype, device=attention_mask_from_inputs.device)
        attention_mask_with_cls = torch.cat((cls_attention_mask, attention_mask_from_inputs), dim=1)

        performer_output = self.performer(x, mask=attention_mask_with_cls)
        
        pooled_output = performer_output[:, 0, :] 

        logits = self.classifier_head(pooled_output)
        return {'logits': logits, 'sample_embedding': pooled_output}