# src/model.py

import torch
import torch.nn as nn
import pywt
#from mamba_ssm import Mamba
# 定义残差块
class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv1d(channels, channels, kernel_size=3, padding=1),
            nn.BatchNorm1d(channels),
            nn.ReLU()
        )

    def forward(self, x):
        return x + self.conv(x)

# 定义注意力机制
class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        self.attention = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, 1)
        )

    def forward(self, x):
        # x: [batch_size, seq_len, hidden_dim]
        attn_weights = self.attention(x)  # [batch_size, seq_len, 1]
        attn_weights = torch.softmax(attn_weights, dim=1)
        # 加权求和
        attn_output = torch.sum(x * attn_weights, dim=1)  # [batch_size, hidden_dim]
        return attn_output
   
# 修改 MambaBlock 类为单向处理
class MambaBlock(nn.Module):
    def __init__(self, d_model, d_state=16, d_conv=4, expand=2):
        super().__init__()
        self.d_model = d_model
        
        # 使用官方封包 Mamba 实现
        self.mamba = Mamba(
            d_model=d_model,
            d_state=d_state,
            d_conv=d_conv,
            expand=expand
        )
        
        self.norm = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(0.1)

    def forward(self, x):
        # x shape: [batch_size, seq_len, d_model]
        norm_x = self.norm(x)
        out = self.mamba(norm_x)
        out = self.dropout(out)
        return out

# 修改小波变换模块
class WaveletTransform(nn.Module):
    def __init__(self, wavelet='db4', level=6):
        super(WaveletTransform, self).__init__()
        self.wavelet = wavelet
        self.level = level
        
        # 为每层小波系数添加CNN处理
        self.wavelet_cnns = nn.ModuleList([
            nn.Sequential(
                nn.Conv1d(1, 16, kernel_size=5, padding=1),
                nn.BatchNorm1d(16),
                nn.ReLU(),
                nn.Conv1d(16, 32, kernel_size=5, padding=1),
                nn.BatchNorm1d(32),
                nn.ReLU(),
                ResidualBlock(32)
            ) for _ in range(level + 1)  # level+1是因为包含最后的近似系数
        ])
        
        # 特征融合层
        self.fusion_conv = nn.Sequential(
            nn.Conv1d(32 * (level + 1), 256, kernel_size=3, padding=1),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            ResidualBlock(256)
        )

    def forward(self, x):
        batch_size = x.shape[0]
        wavelet_features = []
        
        for i in range(batch_size):
            coeffs = pywt.wavedec(x[i, 0].cpu().numpy(), self.wavelet, level=self.level)
            # 处理每一层小波系数
            level_features = []
            for j, coef in enumerate(coeffs):
                coef_tensor = torch.from_numpy(coef).float().to(x.device)
                # 添加batch和channel维度
                coef_tensor = coef_tensor.unsqueeze(0).unsqueeze(0)
                # 使用CNN处理
                level_feature = self.wavelet_cnns[j](coef_tensor)
                # 统一特征大小
                level_feature = nn.functional.adaptive_avg_pool1d(level_feature, 99)
                level_features.append(level_feature)
            
            # 拼接所有层的特征
            sample_features = torch.cat(level_features, dim=1)
            wavelet_features.append(sample_features)
        
        # 合并batch维度的特征
        wavelet_features = torch.cat(wavelet_features, dim=0)
        # 融合所有层的特征
        fused_features = self.fusion_conv(wavelet_features)
        return fused_features

class ECG_Autoencoder_Classifier_Lstm(nn.Module):
    def __init__(self, input_dim, hidden_dim, latent_dim, num_layers):
        super(ECG_Autoencoder_Classifier_Lstm, self).__init__()
        self.num_layers = num_layers
        self.hidden_dim = hidden_dim
        
        # 小波变换模块，设置输出大小与CNN特征相匹配
        self.wavelet = WaveletTransform()  # 修改小波变换模块
        
        # 仅保留时域处理部分
        # self.time_conv = nn.Sequential(
        #     nn.Conv1d(1, 16, kernel_size=18, stride=1),
        #     nn.BatchNorm1d(16),
        #     nn.ReLU(),
        #     nn.MaxPool1d(kernel_size=7, stride=7),
        #     nn.Conv1d(16, hidden_dim, kernel_size=13, stride=1),
        #     nn.BatchNorm1d(hidden_dim),
        #     nn.ReLU(),
        #     nn.MaxPool1d(kernel_size=7, stride=7),
        #     ResidualBlock(hidden_dim)
        # )

        # 修改特征融合网络
        self.feature_fusion = nn.Sequential(
            nn.Conv1d(256, hidden_dim, kernel_size=3, padding=1),  # 输出通道数改为hidden_dim
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            ResidualBlock(hidden_dim)
        )
        
        # 修改LSTM层配置
        self.lstm_layers = nn.ModuleList([
            nn.LSTM(
                input_size=hidden_dim if i == 0 else hidden_dim * 2,  # 第一层使用hidden_dim，后续层使用hidden_dim*2
                hidden_size=hidden_dim,
                batch_first=True,
                bidirectional=True
            )
            for i in range(3)
        ])
        
        # 修改特征增强CNN
        self.feature_enhancement = nn.Sequential(
            nn.Conv1d(hidden_dim * 2, hidden_dim, kernel_size=3, padding=1),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            ResidualBlock(hidden_dim),
            nn.AdaptiveAvgPool1d(1)
        )
        
        # 注意力机制
        self.attention = Attention(hidden_dim)
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_dim),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, 32),
            nn.ReLU(),
            nn.BatchNorm1d(32),
            nn.Linear(32, 1)
        )

    def forward(self, x):
        # 原始CNN特征
        # cnn_features = self.time_conv(x)
        
        # 小波变换特征提取（现在直接输出处理后的特征）
        wavelet_features = self.wavelet(x)
        
        # 确保特征维度匹配
        # if wavelet_features.size(2) != cnn_features.size(2):
        #     wavelet_features = nn.functional.interpolate(
        #         wavelet_features,
        #         size=cnn_features.size(2),
        #         mode='linear'
        #     )
        
        # 特征融合和LSTM处理
        fused_features = self.feature_fusion(wavelet_features)  # [batch_size, hidden_dim, seq_len]
        
        # LSTM处理
        lstm_out = fused_features.transpose(1, 2)  # [batch_size, seq_len, hidden_dim]
        for lstm in self.lstm_layers:
            lstm_out, _ = lstm(lstm_out)
            
        # CNN特征增强
        enhanced_features = self.feature_enhancement(lstm_out.transpose(1, 2))
        
        # 注意力机制
        features = self.attention(enhanced_features.transpose(1, 2))
        
        # 分类
        logits = self.classifier(features)
        return logits.view(-1, 1)
    
class ECG_Autoencoder_Classifier_Transformer(nn.Module):
    def __init__(self, input_dim, hidden_dim, latent_dim, num_layers):
        super(ECG_Autoencoder_Classifier_Transformer, self).__init__()
        self.num_layers = num_layers
        self.hidden_dim = hidden_dim
        
        # 小波变换模块，设置输出大小与CNN特征相匹配
        self.wavelet = WaveletTransform()  # 修改小波变换模块
        
        # 修改特征融合网络
        self.feature_fusion = nn.Sequential(
            nn.Conv1d(256, hidden_dim, kernel_size=3, padding=1),  # 输出通道数改为hidden_dim
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            ResidualBlock(hidden_dim)
        )
        
        # Transformer配置
        encoder_layers = nn.TransformerEncoderLayer(
            d_model=hidden_dim,  # 模型的特征维度
            nhead=4,              # 注意力头数
            dim_feedforward=hidden_dim * 2,  # 前馈网络的维度
            dropout=0.1           # Dropout率
        )
        self.transformer_encoder = nn.TransformerEncoder(
            encoder_layers, num_layers=3  # 3层编码器
        )
        
        # 修改特征增强CNN
        self.feature_enhancement = nn.Sequential(
            nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            ResidualBlock(hidden_dim),
            nn.AdaptiveAvgPool1d(1)
        )
        
        # 注意力机制
        self.attention = Attention(hidden_dim)
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_dim),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, 32),
            nn.ReLU(),
            nn.BatchNorm1d(32),
            nn.Linear(32, 1)
        )

    def forward(self, x):
        # 小波变换特征提取
        wavelet_features = self.wavelet(x)
        
        # 特征融合
        fused_features = self.feature_fusion(wavelet_features)  # [batch_size, hidden_dim, seq_len]
        
        # 调整维度以适配Transformer输入格式 [batch_size, seq_len, hidden_dim] -> [seq_len, batch_size, hidden_dim]
        transformer_input = fused_features.transpose(1, 2)  # [batch_size, hidden_dim, seq_len] -> [seq_len, batch_size, hidden_dim]
        
        # Transformer编码
        transformer_out = self.transformer_encoder(transformer_input)
        
        # CNN特征增强
        enhanced_features = self.feature_enhancement(transformer_out.transpose(1, 2))
        
        # 注意力机制
        features = self.attention(enhanced_features.transpose(1, 2))
        
        # 分类
        logits = self.classifier(features)
        return logits.view(-1, 1)
    
class ECG_Autoencoder_Classifier_Mamba(nn.Module):
    def __init__(self, input_dim, hidden_dim, latent_dim, num_layers):
        super(ECG_Autoencoder_Classifier_Mamba, self).__init__()
        self.num_layers = num_layers
        self.hidden_dim = hidden_dim
        
        # 小波变换模块，设置输出大小与CNN特征相匹配
        self.wavelet = WaveletTransform()  # 修改小波变换模块
        
        # 仅保留时域处理部分
        # self.time_conv = nn.Sequential(
        #     nn.Conv1d(1, 16, kernel_size=18, stride=1),
        #     nn.BatchNorm1d(16),
        #     nn.ReLU(),
        #     nn.MaxPool1d(kernel_size=7, stride=7),
        #     nn.Conv1d(16, hidden_dim, kernel_size=13, stride=1),
        #     nn.BatchNorm1d(hidden_dim),
        #     nn.ReLU(),
        #     nn.MaxPool1d(kernel_size=7, stride=7),
        #     ResidualBlock(hidden_dim)
        # )

        # 修改特征融合网络
        self.feature_fusion = nn.Sequential(
            nn.Conv1d(256, hidden_dim, kernel_size=3, padding=1),  # 输出通道数改为hidden_dim
            # nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            ResidualBlock(hidden_dim)
        )
        
        # 更新 Mamba 层参数
        self.mamba_layers = nn.ModuleList([
            MambaBlock(
                d_model=hidden_dim,
                d_state=16,  # 可以根据需要调整
                d_conv=4,
                expand=2
            ) for _ in range(3)
        ])
        
        # 修改特征增强CNN的输入通道数为hidden_dim
        self.feature_enhancement = nn.Sequential(
            nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1),  # 从hidden_dim * 2改为hidden_dim
            #nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            ResidualBlock(hidden_dim),
            nn.AdaptiveAvgPool1d(1)
        )
        
        # 注意力机制
        self.attention = Attention(hidden_dim)
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_dim),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, 32),
            nn.ReLU(),
            nn.BatchNorm1d(32),
            nn.Linear(32, 1)
        )

    def forward(self, x):
        # 原始CNN特征
        # cnn_features = self.time_conv(x)
        
        # 小波变换特征提取（现在直接输出处理后的特征）
        wavelet_features = self.wavelet(x)
        
        # 确保特征维度匹配
        # if wavelet_features.size(2) != cnn_features.size(2):
        #     wavelet_features = nn.functional.interpolate(
        #         wavelet_features,
        #         size=cnn_features.size(2),
        #         mode='linear'
        #     )
        
        # 特征融合和Mamba处理
        fused_features = self.feature_fusion(wavelet_features)  # [batch_size, hidden_dim, seq_len]
        
        # 使用MambaBlock进行处理
        mamba_out = fused_features.transpose(1, 2)
        for mamba in self.mamba_layers:
            mamba_out = mamba(mamba_out)
        
        # CNN特征增强
        enhanced_features = self.feature_enhancement(mamba_out.transpose(1, 2))
        
        # 注意力机制
        features = self.attention(enhanced_features.transpose(1, 2))
        
        # 分类
        logits = self.classifier(features)
        return logits.view(-1, 1)

class ECG_Autoencoder_Classifier_Cnn(nn.Module):
    def __init__(self, input_dim, hidden_dim, latent_dim, num_layers):
        super(ECG_Autoencoder_Classifier_Cnn, self).__init__()
        self.num_layers = num_layers
        self.hidden_dim = hidden_dim
        
        # 小波变换模块，设置输出大小与CNN特征相匹配
        self.wavelet = WaveletTransform()  # 修改小波变换模块
        
        # 仅保留时域处理部分
        # self.time_conv = nn.Sequential(
        #     nn.Conv1d(1, 16, kernel_size=18, stride=1),
        #     nn.BatchNorm1d(16),
        #     nn.ReLU(),
        #     nn.MaxPool1d(kernel_size=7, stride=7),
        #     nn.Conv1d(16, hidden_dim, kernel_size=13, stride=1),
        #     nn.BatchNorm1d(hidden_dim),
        #     nn.ReLU(),
        #     nn.MaxPool1d(kernel_size=7, stride=7),
        #     ResidualBlock(hidden_dim)
        # )

        # 修改特征融合网络
        self.feature_fusion = nn.Sequential(
            nn.Conv1d(256, hidden_dim, kernel_size=3, padding=1),  # 输出通道数改为hidden_dim
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            ResidualBlock(hidden_dim)
        )
        
        # 替换LSTM层为CNN层
        self.cnn_layers = nn.ModuleList([
            nn.Sequential(
                nn.Conv1d(hidden_dim if i == 0 else hidden_dim * 2, hidden_dim * 2, kernel_size=3, padding=1),
                nn.BatchNorm1d(hidden_dim * 2),
                nn.ReLU(),
                ResidualBlock(hidden_dim * 2)
            ) for i in range(3)
        ])
        
        # 修改特征增强CNN
        self.feature_enhancement = nn.Sequential(
            nn.Conv1d(hidden_dim * 2, hidden_dim, kernel_size=3, padding=1),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            ResidualBlock(hidden_dim),
            nn.AdaptiveAvgPool1d(1)
        )
        
        # 注意力机制
        self.attention = Attention(hidden_dim)
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_dim),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, 32),
            nn.ReLU(),
            nn.BatchNorm1d(32),
            nn.Linear(32, 1)
        )

    def forward(self, x):
        # 原始CNN特征
        # cnn_features = self.time_conv(x)
        
        # 小波变换特征提取（现在直接输出处理后的特征）
        wavelet_features = self.wavelet(x)
        
        # 确保特征维度匹配
        # if wavelet_features.size(2) != cnn_features.size(2):
        #     wavelet_features = nn.functional.interpolate(
        #         wavelet_features,
        #         size=cnn_features.size(2),
        #         mode='linear'
        #     )
        
        # 特征融合和CNN处理
        fused_features = self.feature_fusion(wavelet_features)  # [batch_size, hidden_dim, seq_len]
        
        # 替换LSTM处理为CNN处理
        cnn_out = fused_features
        for cnn in self.cnn_layers:
            cnn_out = cnn(cnn_out)
        
        # CNN特征增强
        enhanced_features = self.feature_enhancement(cnn_out)
        
        # 注意力机制
        features = self.attention(enhanced_features.transpose(1, 2))
        
        # 分类
        logits = self.classifier(features)
        return logits.view(-1, 1)