# pathformer.py
import torch
import torch.nn as nn
import pytorch_lightning as pl
from layers.AMS import AMS
from layers.RevIN import RevIN


class ModelConfig:
    def __init__(self):
        # 时间序列参数
        self.seq_len = 672  # 输入序列长度
        self.pred_len = 24  # 预测步长
        self.num_nodes = 12  # 特征维度

        # 模型结构参数
        self.layer_nums = 4  # AMS层数
        self.num_experts_list = [4, 4, 4, 4]  # 每层专家数
        self.patch_size_list = [24, 12, 6, 3]  # 多尺度划分
        self.d_model = 256  # 隐藏维度
        self.d_ff = 512  # FFN维度
        self.residual_connection = True  # 是否使用残差
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'


class PathFormer(pl.LightningModule):
    """集成AMS模块和RevIN的完整PathFormer"""

    def __init__(self, configs):
        super().__init__()
        self.save_hyperparameters()
        self.configs = configs

        # 关键组件
        self.revin = RevIN(num_features=configs.num_nodes)
        self.start_fc = nn.Linear(1, configs.d_model)

        # AMS模块堆叠
        self.ams_layers = nn.ModuleList([
            AMS(input_size=configs.seq_len,
                output_size=configs.seq_len,
                num_experts=configs.num_experts_list[i],
                device=configs.device,
                patch_size=configs.patch_size_list,
                d_model=configs.d_model,
                d_ff=configs.d_ff,
                residual_connection=configs.residual_connection)
            for i in range(configs.layer_nums)
        ])

        # 输出投影
        self.projection = nn.Sequential(
            nn.Linear(configs.seq_len * configs.d_model, configs.pred_len),
            nn.LayerNorm(configs.pred_len)
        )

    def forward(self, x):
        # 可逆归一化
        x = self.revin(x, 'norm')
        x = self.start_fc(x.unsqueeze(-1))

        # 多尺度处理
        balance_loss = 0
        for ams_layer in self.ams_layers:
            x, aux_loss = ams_layer(x)
            balance_loss += aux_loss

        # 输出处理
        x = x.permute(0, 2, 1, 3).reshape(x.size(0), -1)
        x = self.projection(x)
        x = self.revin(x, 'denorm')
        return x, balance_loss

    def training_step(self, batch, batch_idx):
        x, y = batch
        pred, balance_loss = self(x)
        main_loss = nn.HuberLoss()(pred, y)
        total_loss = main_loss + balance_loss * 0.1  # 平衡损失权重
        self.log('train_loss', total_loss, prog_bar=True)
        return total_loss

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(self.parameters(), lr=1e-4)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, patience=5, factor=0.5)
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": scheduler,
                "monitor": "train_loss",
                "interval": "epoch"
            }
        }