import math
import torch
import torch.nn as nn
import config

# 创建模型架构
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class LSTMCNN(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size):
        super(LSTMCNN, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        # LSTM层
        self.lstm = nn.LSTM(input_size, hidden_size,
                            num_layers, batch_first=True)

        # 1D CNN层
        self.conv1 = nn.Conv1d(hidden_size, 64, kernel_size=3, padding=1)
        self.pool = nn.MaxPool1d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv1d(64, 128, kernel_size=3, padding=1)

        # 全连接层
        self.fc1 = nn.Linear(128 * (config.TIME_STEPS // 2 // 2), 100)
        self.dropout = nn.Dropout(0.5)
        self.fc2 = nn.Linear(100, output_size)

    def forward(self, x):
        # 初始化隐藏状态和单元状态
        h0 = torch.zeros(self.num_layers, x.size(
            0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(
            0), self.hidden_size).to(x.device)

        # LSTM前向传播
        lstm_out, _ = self.lstm(x, (h0, c0))

        # 重塑以适应卷积层 [batch, seq_len, hidden] -> [batch, hidden, seq_len]
        cnn_in = lstm_out.permute(0, 2, 1)

        # CNN前向传播
        conv1_out = torch.relu(self.conv1(cnn_in))
        pooled1 = self.pool(conv1_out)
        conv2_out = torch.relu(self.conv2(pooled1))
        pooled2 = self.pool(conv2_out)

        # 重塑以适应全连接层
        fc_in = pooled2.reshape(pooled2.size(0), -1)

        # 全连接层前向传播
        fc1_out = torch.relu(self.fc1(fc_in))
        fc1_out = self.dropout(fc1_out)
        output = self.fc2(fc1_out)

        return output


class TransformerPositionalEncoding(nn.Module):
    def __init__(self, d_model, max_seq_len=100):
        super().__init__()
        pe = torch.zeros(max_seq_len, d_model)
        position = torch.arange(0, max_seq_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(
            0, d_model, 2).float() * (-math.log(10000.0) / d_model))

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        # x shape: [batch_size, seq_len, d_model]
        x = x + self.pe[:, :x.size(1), :]
        return x


class Transformer(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size, nhead=4, dropout=0.1):
        super(Transformer, self).__init__()

        self.embedding = nn.Linear(input_size, hidden_size)

        self.pos_encoder = TransformerPositionalEncoding(hidden_size)

        encoder_layers = nn.TransformerEncoderLayer(
            d_model=hidden_size,
            nhead=nhead,
            dim_feedforward=hidden_size*2,
            dropout=dropout,
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(
            encoder_layers, num_layers=num_layers)

        self.fc = nn.Linear(hidden_size, output_size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.embedding(x)
        x = self.pos_encoder(x)
        x = self.transformer_encoder(x)

        x = torch.mean(x, dim=1)

        x = self.dropout(x)
        x = self.fc(x)
        return x


class GRU(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size):
        super(GRU, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        self.gru = nn.GRU(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=0.2 if num_layers > 1 else 0
        )

        self.fc1 = nn.Linear(hidden_size, 128)
        self.dropout = nn.Dropout(0.3)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(128, output_size)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0),
                         self.hidden_size).to(device)

        # shape: (batch_size, seq_len, hidden_size)
        out, _ = self.gru(x, h0)

        out = out[:, -1, :]

        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout(out)
        out = self.fc2(out)

        return out


class ARIMA(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size):
        super(ARIMA, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        # LSTM层处理时间序列数据
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers,
                            batch_first=True, dropout=0.2)

        # 注意力机制
        self.attention = nn.MultiheadAttention(hidden_size, num_heads=4)

        # 全连接层进行分类
        self.fc1 = nn.Linear(hidden_size, 32)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.2)
        self.fc2 = nn.Linear(32, output_size)

    def forward(self, x):
        # 初始化隐藏状态
        h0 = torch.zeros(self.num_layers, x.size(0),
                         self.hidden_size).to(device)
        c0 = torch.zeros(self.num_layers, x.size(0),
                         self.hidden_size).to(device)

        # LSTM前向传播
        lstm_out, _ = self.lstm(x, (h0, c0))

        # 获取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]

        # 应用注意力机制 (调整形状以匹配MultiheadAttention的输入要求)
        # [seq_len, batch_size, hidden_size]
        attn_input = lstm_out.transpose(0, 1)
        attn_output, _ = self.attention(attn_input, attn_input, attn_input)
        attn_output = attn_output[-1]  # 取最后一个时间步

        # 结合LSTM输出和注意力输出
        combined = last_output + attn_output

        # 全连接层
        x = self.fc1(combined)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)

        return x
