import sys

import numpy as np
import torch
import torch.nn as nn
from orbitP.script import config
from orbitP.layers.FeatureEncoder import FeatureEncoder

import torch
import torch.nn as nn

class Lstm(nn.Module):
    def __init__(self, args):
        super(Lstm, self).__init__()
        self.hidden_dim = args.d_model
        self.num_layers = args.num_layers
        if config.useHead:
            self.feature_size = args.feature_size-1+args.embed_dim+config.stampSize
        else:
            self.feature_size = args.feature_size+config.stampSize
        if config.nll:
            self.output_dim = 2*config.outputSize
        else:
            self.output_dim = config.outputSize
        self.forecast_window = config.forecast_window

        # ---------- Encoder ----------
        self.encoder = nn.LSTM(
            input_size=self.feature_size,
            hidden_size=self.hidden_dim,
            num_layers=self.num_layers,
            batch_first=True,
            dropout=args.dropout if args.num_layers > 1 else 0.0,
            bidirectional=False
        )

        # ---------- Decoder ----------
        self.decoder = nn.LSTM(
            input_size=config.outputSize,  # 可使用时间位置特征或占位向量
            hidden_size=self.hidden_dim,
            num_layers=self.num_layers,
            batch_first=True,
            dropout=args.dropout if args.num_layers > 1 else 0.0,
            bidirectional=False
        )

        # ---------- 输出层 ----------
        self.fc = nn.Sequential(
            nn.Linear(self.hidden_dim, self.hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(self.hidden_dim // 2, self.output_dim)
        )
        self.ln = nn.LayerNorm(self.hidden_dim)
        if config.useHead:
            self.FeatureEncoder = FeatureEncoder(args)

    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
        """
        输入：
            x_enc: [B,192,D]
            x_dec: [B,96,D]
        输出：
            y_hat: [B,96,output_dim]
        """
        # -------- Encoder --------
        x_enc = x_enc.clone()
        x_mark_enc = x_mark_enc.clone()
        x_dec = x_dec[:, :, config.axis:-1].clone()
        x_mark_dec = x_mark_dec.clone()
        B,_,_ = x_dec.size()
        if config.useHead:
            enc_input = self.FeatureEncoder(x_enc, x_mark_enc, x_dec, x_mark_dec)
        else:
            enc_input = torch.cat((x_enc, x_mark_enc), dim=-1)
        _, (h, c) = self.encoder(enc_input)
        dec_input = torch.zeros([B,self.forecast_window,config.outputSize]).to(x_dec.device) # 0初始化
        dec_output, _ = self.decoder(dec_input, (h, c))

        dec_output = self.ln(dec_output)
        output = self.fc(dec_output)  # [B,96,output_dim]

        return output



# class Lstm(nn.Module):
#     def __init__(self, args):
#         super(Lstm, self).__init__()
#         self.hidden_dim = args.d_model
#         self.num_layers = args.num_layers
#         # self.feature_size = args.head_dim
#         # self.feature_size = args.feature_size+config.stampSize
#         self.feature_size = args.feature_size-5+config.stampSize
#         self.output_dim = config.outputSize + config.nll
#         self.lstm = nn.LSTM(
#             input_size=self.feature_size,
#             hidden_size=args.d_model,
#             num_layers=args.num_layers,
#             batch_first=True,
#             dropout=args.dropout if args.num_layers > 1 else 0.0
#         )
#
#         # 修改2：调整输出层结构
#         self.fc = nn.Sequential(
#             nn.Linear(self.hidden_dim, self.hidden_dim // 2),
#             nn.ReLU(),
#             nn.Linear(self.hidden_dim // 2, self.output_dim)
#         )
#         self.ln = nn.LayerNorm(self.hidden_dim)
#         self.FeatureEncoder = FeatureEncoder(args)
#
#     def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
#         x_enc = x_enc.clone()
#         x_mark_enc = x_mark_enc.clone()
#         x_dec = x_dec[:,:,config.axis:-1].clone()
#         x_mark_dec = x_mark_dec.clone()
#         input = self.FeatureEncoder(x_enc, x_mark_enc, x_dec, x_mark_dec)
#         # 应用层归一化
#         output, _ = self.lstm(input)
#         output = self.ln(output)
#
#         output = self.fc(output[:, -config.forecast_window:, :])
#         return output


if __name__ == "__main__":
    pass
