import torch.nn as nn
import torch, math
import time
from orbitP.script import config
import numpy as np
from orbitP.layers.FeatureEncoder import FeatureEncoder

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, device, max_len=5000):
        super().__init__()
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)  # [max_len,1]
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(np.log(10000.0) / d_model))
        pe = torch.zeros(max_len, d_model)
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)  # 注册 buffer，不会被更新
        self.device = device

    def forward(self, x):
        """
        x: [B, T, d_model]
        """
        T = x.size(1)
        pe = self.pe[:T, :].unsqueeze(0)  # [1, T, d_model]
        return pe.to(self.device)


class Transformer(nn.Module):
    def __init__(self, args):
        super(Transformer, self).__init__()
        self.feature_size = args.head_dim
        # self.feature_size = args.feature_size+config.stampSize
        self.output_size = config.outputSize+config.nll
        self.seq_len = config.training_length
        self.pred_len = config.predicting_length
        self.focst_len = config.forecast_window
        self.pos_encoder = PositionalEncoding(args.d_model, config.device)
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=args.d_model, nhead=args.k,
                                                        dim_feedforward=args.d_ff,dropout=args.dropout, batch_first=True)
        self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=args.num_layers)
        self.input_proj = nn.Linear(self.feature_size, args.d_model)
        self.output_proj = nn.Linear(args.d_model, config.predicting_length * self.output_size)
        self.FeatureEncoder = FeatureEncoder(args)

    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
        x_enc = x_enc.clone()
        x_mark_enc = x_mark_enc.clone()
        x_dec = x_dec[:,:,config.axis:-1].clone()
        x_mark_dec = x_mark_dec.clone()
        input = self.FeatureEncoder(x_enc, x_mark_enc, x_dec, x_mark_dec)
        B, L, N = input.shape  # B L N
        input = self.input_proj(input) + self.pos_encoder(input)
        # 全局平均池化
        output = self.encoder(input)
        output = output.mean(dim=1)  # [batch_size, d_model]
        # 输出投影并重塑
        output = self.output_proj(output)  # [batch_size, output_seq_len * output_features]
        output = output.view(B, self.pred_len, self.output_size)  # [batch_size, m, k]

        return output
