import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging

class TransformerEncoder(nn.Module):
    def __init__(self, input_dim, d_model, nhead, num_layers, dim_feedforward=2048, dropout=0.1):
        super().__init__()
        self.linear = nn.Linear(input_dim, d_model)
        self.pos_encoder = PositionalEncoding(d_model, dropout)
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers)
        self.norm = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, src):
        """
        Args:
            src: [batch_size, seq_len, input_dim]
        Returns:
            output: [batch_size, seq_len, d_model]
        """
        # 线性投影
        src = self.linear(src)  # [batch_size, seq_len, d_model]
        
        # 位置编码
        src = src.transpose(0, 1)  # [seq_len, batch_size, d_model]
        pos_encoding = self.pos_encoder.pe[:, :src.size(0)]  # [1, seq_len, d_model]
        src = src + pos_encoding.transpose(0, 1)  # [seq_len, batch_size, d_model]
        src = self.dropout(src)
        
        # Transformer编码
        output = self.transformer_encoder(src)  # [seq_len, batch_size, d_model]
        output = output.transpose(0, 1)  # [batch_size, seq_len, d_model]
        
        # 层归一化
        output = self.norm(output)
        
        return output

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)  # [1, max_len, d_model]
        self.register_buffer('pe', pe)

    def forward(self, x):
        """
        Args:
            x: [batch_size, seq_len, d_model]
        """
        # 确保位置编码的长度足够
        if x.size(1) > self.pe.size(1):
            raise ValueError(f"Sequence length {x.size(1)} exceeds maximum length {self.pe.size(1)}")
        
        # 广播位置编码到所有batch
        pos_encoding = self.pe[:, :x.size(1)]  # [1, seq_len, d_model]
        pos_encoding = pos_encoding.expand(x.size(0), -1, -1)  # [batch_size, seq_len, d_model]
        x = x + pos_encoding
        return self.dropout(x)

class CNNFeatureExtractor(nn.Module):
    def __init__(self, input_channels=4, output_dim=161):
        super().__init__()
        self.conv_layers = nn.Sequential(
            # 第一个卷积块
            nn.Conv1d(input_channels, 32, kernel_size=3, padding=1),
            nn.BatchNorm1d(32),
            nn.ReLU(),
            nn.MaxPool1d(2),
            
            # 第二个卷积块
            nn.Conv1d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(2),
            
            # 第三个卷积块
            nn.Conv1d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.MaxPool1d(2),
            
            # 第四个卷积块
            nn.Conv1d(128, 161, kernel_size=3, padding=1),
            nn.BatchNorm1d(161),
            nn.ReLU(),
            nn.AdaptiveAvgPool1d(1)
        )
        
    def forward(self, x):
        # 确保输入维度正确
        if x.dim() == 2:
            x = x.unsqueeze(0)
        
        # 应用卷积层
        x = self.conv_layers(x)
        
        # 调整输出维度为(batch_size, 161)
        x = x.squeeze(-1)
        
        # 如果batch_size为1，确保输出维度正确
        if x.dim() == 1:
            x = x.unsqueeze(0)
            
        return x

class ExpertLayer(nn.Module):
    def __init__(self, input_size, output_size):
        super().__init__()
        self.fc1 = nn.Linear(input_size, input_size * 2)
        self.fc2 = nn.Linear(input_size * 2, output_size)
        
    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

class MixtureOfExperts(nn.Module):
    def __init__(self, num_experts, input_size, output_size):
        super().__init__()
        self.num_experts = num_experts
        self.experts = nn.ModuleList([
            ExpertLayer(input_size, output_size) for _ in range(num_experts)
        ])
        self.gate = nn.Linear(input_size, num_experts)
        
    def forward(self, x):
        # 计算门控权重
        gates = F.softmax(self.gate(x), dim=-1)
        
        # 获取每个专家的输出
        expert_outputs = torch.stack([expert(x) for expert in self.experts])
        
        # 组合专家输出
        output = torch.sum(gates.unsqueeze(-1) * expert_outputs.transpose(0, 1), dim=1)
        return output

class ChromatinModel(nn.Module):
    def __init__(self, num_classes=7, d_model=256, num_heads=8, num_layers=6):
        super().__init__()
        
        # CNN特征提取器
        self.cnn = CNNFeatureExtractor(input_channels=4, output_dim=161)
        
        # 特征转换层
        self.feature_transform = nn.Sequential(
            nn.Linear(322, d_model),  # 161 + 161 = 322
            nn.LayerNorm(d_model),
            nn.ReLU(),
            nn.Dropout(0.1)
        )
        
        # Transformer编码器
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=num_heads,
            dim_feedforward=d_model * 4,
            dropout=0.1,
            activation='gelu',
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        
        # MoE层
        self.moe = MixtureOfExperts(
            num_experts=8,
            input_size=d_model,
            output_size=d_model // 2
        )
        
        # 最终分类层
        self.classifier = nn.Sequential(
            nn.Linear(d_model // 2, d_model // 4),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(d_model // 4, num_classes)
        )
        
    def forward(self, sequence_features, histone_features):
        # 确保输入维度正确
        if sequence_features.dim() == 2:
            sequence_features = sequence_features.unsqueeze(0)
        if histone_features.dim() == 2:
            histone_features = histone_features.unsqueeze(0)
            
        # 打印输入维度以便调试
        print(f"Sequence features shape: {sequence_features.shape}")
        print(f"Histone features shape: {histone_features.shape}")
            
        # CNN处理序列特征
        seq_features = self.cnn(sequence_features)
        
        # 调整组蛋白特征维度为161
        histone_features = F.interpolate(
            histone_features.unsqueeze(1), 
            size=161, 
            mode='linear'
        ).squeeze(1)
        
        # 合并特征
        combined_features = torch.cat([seq_features, histone_features], dim=1)
        
        # 转换特征维度
        transformed_features = self.feature_transform(combined_features)
        
        # Transformer处理
        transformer_out = self.transformer(transformed_features.unsqueeze(1)).squeeze(1)
        
        # MoE处理
        moe_out = self.moe(transformer_out)
        
        # 分类
        output = self.classifier(moe_out)
        return output

class TMBModel(nn.Module):
    def __init__(self, input_dim=322, hidden_dim=256, num_classes=9, dropout=0.1):
        """
        轻量化的TMB模型
        
        Args:
            input_dim: 输入特征维度 (动态设置，由实际数据决定)
            hidden_dim: 隐藏层维度
            num_classes: 分类数量 (设置为9，对应9种染色质状态)
            dropout: dropout比率
        """
        super().__init__()
        
        # 记录实际使用的输入维度
        logger = logging.getLogger(__name__)
        logger.info(f"TMBModel初始化，使用输入维度: {input_dim}")
        logger.info(f"TMBModel初始化，分类数量: {num_classes}")
        
        # 特征提取器
        self.feature_extractor = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, hidden_dim // 2)
        )
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.LayerNorm(hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim // 2, num_classes)
        )
        
        # 初始化权重
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        """初始化模型权重"""
        if isinstance(module, nn.Linear):
            nn.init.xavier_uniform_(module.weight)
            if module.bias is not None:
                nn.init.zeros_(module.bias)
        elif isinstance(module, nn.LayerNorm):
            nn.init.ones_(module.weight)
            nn.init.zeros_(module.bias)

    def forward(self, features, expression=None):
        """
        前向传播
        
        Args:
            features: [batch_size, input_dim] 组合特征
            expression: [batch_size] 基因表达值(可选)
        """
        # 添加输入形状的日志打印
        logger = logging.getLogger(__name__)
        logger.debug(f"TMBModel forward 接收到的特征形状: {features.shape}")
        
        # 特征提取
        x = self.feature_extractor(features)
        
        # 如果有表达数据,添加到特征中
        if expression is not None:
            expression = expression.unsqueeze(1)
            x = torch.cat([x, expression], dim=1)
        
        # 分类
        output = self.classifier(x)
        
        return output

class PolicyNetwork(nn.Module):
    def __init__(self, input_dim, hidden_dim, action_dim, dropout=0.1):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.LayerNorm(hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim // 2, action_dim),
            nn.Softmax(dim=-1)
        )
        
        # 初始化权重
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.xavier_uniform_(module.weight)
            if module.bias is not None:
                nn.init.zeros_(module.bias)
        elif isinstance(module, nn.LayerNorm):
            nn.init.ones_(module.weight)
            nn.init.zeros_(module.bias)

    def forward(self, state):
        return self.net(state) 