import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class AttentionModule(nn.Module):
    """注意力机制模块"""

    def __init__(self, hidden_size, attention_type="dot"):
        super(AttentionModule, self).__init__()
        self.hidden_size = hidden_size
        self.attention_type = attention_type

        if attention_type == "general":
            self.attention_weights = nn.Linear(hidden_size, hidden_size, bias=False)
        elif attention_type == "concat":
            self.attention_weights = nn.Linear(hidden_size * 2, hidden_size, bias=False)
            self.v = nn.Parameter(torch.rand(hidden_size))

    def forward(self, lstm_output):
        # lstm_output shape: (batch_size, seq_len, hidden_size)
        batch_size, seq_len, hidden_size = lstm_output.shape

        if self.attention_type == "dot":
            # 点积注意力
            attention_scores = torch.sum(lstm_output * lstm_output[:, -1:], dim=-1)
        elif self.attention_type == "general":
            # 通用注意力
            transformed = self.attention_weights(lstm_output)
            attention_scores = torch.sum(transformed * lstm_output[:, -1:], dim=-1)
        else:  # concat注意力
            last_hidden = lstm_output[:, -1:].expand(-1, seq_len, -1)
            combined = torch.cat([lstm_output, last_hidden], dim=-1)
            energy = torch.tanh(self.attention_weights(combined))
            attention_scores = torch.sum(self.v * energy, dim=-1)

        # 计算注意力权重
        attention_weights = F.softmax(attention_scores, dim=1)

        # 加权求和
        context_vector = torch.sum(lstm_output * attention_weights.unsqueeze(-1), dim=1)

        return context_vector, attention_weights


class CNN_LSTM_Attention(nn.Module):
    """CNN-LSTM-Attention模型 for ECG信号分类"""

    def __init__(self, input_channels=1, seq_length=268, hidden_size=128,
                 num_layers=2, num_classes=4, dropout=0.3):
        super(CNN_LSTM_Attention, self).__init__()

        self.input_channels = input_channels
        self.seq_length = seq_length
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.num_classes = num_classes

        # CNN部分 - 提取局部特征
        self.cnn = nn.Sequential(
            # 第一层卷积
            nn.Conv1d(input_channels, 64, kernel_size=7, stride=1, padding=3),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2),
            nn.Dropout(dropout),

            # 第二层卷积
            nn.Conv1d(64, 128, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2),
            nn.Dropout(dropout),

            # 第三层卷积
            nn.Conv1d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2),
            nn.Dropout(dropout)
        )

        # 计算CNN输出长度
        self.cnn_output_length = self._get_cnn_output_length(seq_length)

        # LSTM部分 - 捕捉时序依赖
        self.lstm = nn.LSTM(
            input_size=256,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            bidirectional=True,  # 使用双向LSTM
            dropout=dropout if num_layers > 1 else 0
        )

        # 注意力机制
        self.attention = AttentionModule(hidden_size * 2)  # 双向LSTM所以hidden_size*2

        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(hidden_size * 2, 512),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Linear(512, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Linear(128, num_classes)
        )

    def _get_cnn_output_length(self, length):
        """计算经过CNN层后的序列长度"""
        # 三次MaxPool1d(kernel_size=2, stride=2)
        for _ in range(3):
            length = (length + 2 * 1 - 2) // 2 + 1  # 池化计算公式
        return length

    def forward(self, x):
        # 输入x形状: (batch_size, 1, 268)
        batch_size = x.size(0)

        # CNN特征提取
        cnn_features = self.cnn(x)  # (batch_size, 256, cnn_output_length)

        # 调整维度用于LSTM输入
        cnn_features = cnn_features.permute(0, 2, 1)  # (batch_size, cnn_output_length, 256)

        # LSTM时序建模
        lstm_out, (h_n, c_n) = self.lstm(cnn_features)  # (batch_size, cnn_output_length, hidden_size*2)

        # 注意力机制
        context_vector, attention_weights = self.attention(lstm_out)

        # 分类
        output = self.classifier(context_vector)

        return output


class MultiHeadAttentionVariant(nn.Module):
    """使用PyTorch内置MultiheadAttention的变体"""

    def __init__(self, input_channels=1, seq_length=268, hidden_size=128,
                 num_heads=4, num_classes=4, dropout=0.3):
        super(MultiHeadAttentionVariant, self).__init__()

        # CNN部分
        self.cnn = nn.Sequential(
            nn.Conv1d(input_channels, 64, kernel_size=15, stride=2, padding=7),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=3, stride=2, padding=1),
            nn.Dropout(dropout),

            nn.Conv1d(64, 128, kernel_size=7, stride=1, padding=3),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=3, stride=2, padding=1),
            nn.Dropout(dropout)
        )

        # LSTM部分
        self.lstm = nn.LSTM(128, hidden_size, batch_first=True, bidirectional=True)

        # 多头注意力
        self.multihead_attn = nn.MultiheadAttention(
            embed_dim=hidden_size * 2,
            num_heads=num_heads,
            batch_first=True,
            dropout=dropout
        )

        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(hidden_size * 2, 256),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Linear(256, num_classes)
        )

    def forward(self, x):
        # CNN处理
        cnn_out = self.cnn(x)
        cnn_out = cnn_out.permute(0, 2, 1)

        # LSTM处理
        lstm_out, _ = self.lstm(cnn_out)

        # 多头注意力
        attn_output, attn_weights = self.multihead_attn(lstm_out, lstm_out, lstm_out)

        # 全局平均池化
        context = attn_output.mean(dim=1)

        # 分类
        output = self.classifier(context)

        return output, attn_weights


# 模型工具函数
def create_model(model_type="cnn_lstm_attention", **kwargs):
    """创建模型工厂函数"""
    if model_type == "cnn_lstm_attention":
        return CNN_LSTM_Attention(**kwargs)
    elif model_type == "multihead_attention":
        return MultiHeadAttentionVariant(**kwargs)
    else:
        raise ValueError(f"未知的模型类型: {model_type}")


def count_parameters(model):
    """计算模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)
