import math
import sys

import torch
import torch.nn as nn
from torch.distributions.normal import Normal
import numpy as np
from orbitP.layers.PathFormer_layers import AMS,WeightGenerator,CustomLinear,RevIN
from orbitP.layers.FeatureEncoder import FeatureEncoder
from orbitP.script import config

class PathFormer(nn.Module):
    def __init__(self, args):
        super(PathFormer, self).__init__()
        self.layer_nums = args.num_layers  # 设置pathway的层数
        self.feature_size = args.head_dim
        # self.feature_size = args.feature_size+config.stampSize+config.axis+1
        self.pre_len = config.forecast_window
        self.seq_len = config.training_length
        self.k = args.k
        self.num_experts_list = args.num_experts_list
        self.patch_size_list = args.patch_size_list
        self.d_model = args.d_model
        self.d_ff = args.d_ff
        self.residual_connection = args.residual_connection
        self.revin = args.revin
        if self.revin:
            self.revin_layer = RevIN(num_features=self.feature_size, affine=False, subtract_last=False)

        self.start_fc = nn.Linear(in_features=1, out_features=self.d_model)
        self.AMS_lists = nn.ModuleList()
        self.device = torch.device(config.device)
        self.batch_norm = args.batch_norm

        for num in range(self.layer_nums):
            self.AMS_lists.append(
                AMS(self.seq_len, self.seq_len, self.num_experts_list[num], self.device, k=self.k,
                    num_nodes=self.feature_size, patch_size=self.patch_size_list[num], noisy_gating=True,
                    d_model=self.d_model, d_ff=self.d_ff, layer_number=num + 1, residual_connection=self.residual_connection, batch_norm=self.batch_norm))
        self.projections = nn.Linear(self.seq_len * self.d_model, self.pre_len)
        self.outputLayer = nn.Linear(self.feature_size, config.outputSize, bias=True)
        self.FeatureEncoder = FeatureEncoder(args)

    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
        x_enc = x_enc.clone()
        x_mark_enc = x_mark_enc.clone()
        x_dec = x_dec[:,:,config.axis:-1].clone()
        x_mark_dec = x_mark_dec.clone()
        input = self.FeatureEncoder(x_enc, x_mark_enc, x_dec, x_mark_dec)
        balance_loss = 0
        # norm
        if self.revin:
            input = self.revin_layer(input, 'norm')
        out = self.start_fc(input.unsqueeze(-1))
        batch_size = input.shape[0]

        for layer in self.AMS_lists:
            out, aux_loss = layer(out)
            balance_loss += aux_loss
        out = out.permute(0,2,1,3).reshape(batch_size, self.feature_size, -1)
        out = self.projections(out).transpose(2, 1)
        # denorm
        if self.revin:
            out = self.revin_layer(out, 'denorm')
        out = self.outputLayer(out)
        return out, balance_loss


