# itransformer_lstm.py - 模型架构模块
import torch
import torch.nn as nn

import math

from attention import Attention
from cross_attention import CrossAttention
from feed_forward import FeedForward
from kan import KAN
from kan_linear import KANLinear

class ItransformerLstm(nn.Module):
    """iTransformer_LSTM主模型"""
    def __init__(self, input_size=5, length_input=24, dim_embed=32, dim_lstm=32,
                 depth=3, heads=12, depth_lstm=3, length_pre=1):
        super(ItransformerLstm, self).__init__()
        self.model1 = nn.ModuleList([])
        for _ in range(depth):
            self.model1.append(nn.ModuleList([
                Attention(dim_embed, heads=heads),
                nn.LayerNorm(dim_embed),
                FeedForward(dim_embed),
                nn.LayerNorm(dim_embed)
            ]))

        self.lstm = nn.LSTM(input_size=input_size - 1,
                            hidden_size=dim_lstm,
                            num_layers=depth_lstm,
                            batch_first=True,
                            bidirectional=False)

        self.mlp_in = nn.Sequential(
            nn.Linear(length_input, dim_embed),
            nn.LayerNorm(dim_embed)
        )

        self.cross = CrossAttention(dim=dim_embed, lenth=dim_lstm)
        self.k_mpl = KAN([dim_embed, length_pre])

    def forward(self, x):
        x_first_col = x[:, :, 0]
        x1 = self.mlp_in(x_first_col.unsqueeze(-1).transpose(1, 2))
        x2, _ = self.lstm(x[:, :, 1:])
        for attn, norm1, ff, norm2 in self.model1:
            x1 = norm1(attn(x1) + x1)
            x1 = norm2(ff(x1) + x1)
        x1 = self.cross(x1, x2)
        output = self.k_mpl(x1)
        return output[:, 0, :]
