import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer
from layers.SelfAttention_Family import FullAttention, AttentionLayer
from layers.Embed import DataEmbedding

# ================ 小波核心模块 ================
class Splitting(nn.Module):
    def __init__(self):
        super().__init__()

    def even(self, x):
        return x[:, :, ::2]

    def odd(self, x):
        return x[:, :, 1::2]

    def forward(self, x):
        return (self.even(x), self.odd(x))

# 修改后的LiftingScheme类
class LiftingScheme(nn.Module):
    def __init__(self, channels, kernel_size=3):
        super().__init__()
        pad = (kernel_size // 2, kernel_size - 1 - kernel_size // 2)
        self.P = nn.Sequential(
            nn.ReflectionPad1d(pad),
            nn.Conv1d(channels, channels, kernel_size, groups=channels),
            nn.GELU(),
            nn.InstanceNorm1d(channels)  # 修改为实例归一化
        )
        self.U = nn.Sequential(
            nn.ReflectionPad1d(pad),
            nn.Conv1d(channels, channels, kernel_size, groups=channels),
            nn.GELU(),
            nn.InstanceNorm1d(channels)  # 修改为实例归一化
        )

    def forward(self, x):
        if x.size(2) % 2 != 0:
            x = F.pad(x, (0, 1))
        x_even, x_odd = Splitting()(x)
        c = x_even + self.U(x_odd)
        d = x_odd - self.P(c)
        return c, d
class AdpWaveletBlock(nn.Module):
    def __init__(self, configs):
        super().__init__()
        self.wavelet = LiftingScheme(configs.enc_in, configs.lifting_ks)
        self.norm = nn.InstanceNorm1d(configs.enc_in)
        self.regu_weight = configs.regu_weight

    def forward(self, x):
        c, d = self.wavelet(x)
        c = F.interpolate(c, size=x.size(2), mode='linear', align_corners=True)
        regu_loss = self.regu_weight * (d.abs().mean() + torch.dist(c.mean(), x.mean()))
        return self.norm(c), regu_loss

class AdpWaveletBlock(nn.Module):
    def __init__(self, configs):
        super().__init__()
        # 修改：使用d_model作为通道数
        self.wavelet = LiftingScheme(configs.d_model, configs.lifting_ks)
        self.norm = nn.InstanceNorm1d(configs.d_model)
        self.regu_weight = configs.regu_weight

    def forward(self, x):
        # x shape: [batch, seq_len, d_model]
        x_permuted = x.permute(0, 2, 1)  # [batch, d_model, seq_len]
        c, d = self.wavelet(x_permuted)

        # 上采样回原长度
        c = F.interpolate(c, size=x.size(1), mode='linear', align_corners=True)
        c = self.norm(c)
        c = c.permute(0, 2, 1)  # 恢复形状 [batch, seq_len, d_model]

        # 正则化损失计算
        regu_loss = self.regu_weight * (d.abs().mean() + torch.dist(c.mean(dim=(0, 1)), x.mean(dim=(0, 1))))
        return c, regu_loss


# ================ 改进的Transformer ================
class Model(nn.Module):
    def __init__(self, configs):
        super().__init__()
        self.configs = configs
        self.task_name = configs.task_name

        # 输入嵌入
        self.enc_embed = DataEmbedding(configs.enc_in, configs.d_model,
                                       configs.embed, configs.freq, configs.dropout)

        self.wavelet_layers = nn.ModuleList([
            AdpWaveletBlock(configs) for _ in range(configs.e_layers)
        ])
        self.encoder_layers = nn.ModuleList([
            EncoderLayer(
                AttentionLayer(
                    FullAttention(False, configs.factor, attention_dropout=configs.dropout),
                    configs.d_model, configs.n_heads),
                configs.d_model,
                configs.d_ff,
                dropout=configs.dropout,
                activation=configs.activation
            ) for _ in range(configs.e_layers)
        ])

        # 确保任务初始化方法正确
        self._init_task_modules(configs)
    # 新增任务模块初始化方法
    def _init_task_modules(self, configs):
        if self.task_name in ['long_term_forecast', 'short_term_forecast']:
            # 解码器组件
            self.dec_embed = DataEmbedding(configs.dec_in, configs.d_model,
                                           configs.embed, configs.freq, configs.dropout)
            self.decoder = Decoder(
                [
                    DecoderLayer(
                        AttentionLayer(
                            FullAttention(True, configs.factor, attention_dropout=configs.dropout),
                            configs.d_model, configs.n_heads),
                        AttentionLayer(
                            FullAttention(False, configs.factor, attention_dropout=configs.dropout),
                            configs.d_model, configs.n_heads),
                        configs.d_model,
                        configs.d_ff,
                        dropout=configs.dropout,
                        activation=configs.activation
                    ) for _ in range(configs.e_layers)
                ],
                norm_layer=nn.LayerNorm(configs.d_model)
            )
            self.projection = nn.Linear(configs.d_model, configs.c_out)

        elif self.task_name == 'classification':
            self.head = nn.Sequential(
                nn.Flatten(),
                nn.Linear(configs.d_model * configs.seq_len, 256),
                nn.ReLU(),
                nn.Linear(256, configs.num_classes)
            )

    def forward(self, x_enc, x_mark_enc, x_dec=None, x_mark_dec=None):
        enc_out = self.enc_embed(x_enc, x_mark_enc)
        total_regu = 0

        # 逐层处理
        for wavelet_layer, enc_layer in zip(self.wavelet_layers, self.encoder_layers):
            # 确保enc_out是张量（处理上游返回的元组）
            if isinstance(enc_out, tuple):
                enc_out = enc_out[0]  # 提取主输出

            # 小波变换
            enc_out, regu_loss = wavelet_layer(enc_out)
            total_regu += regu_loss

            # 自注意力处理（处理返回的元组）
            enc_out = enc_layer(enc_out)
            if isinstance(enc_out, tuple):
                enc_out = enc_out[0]  # 提取主输出

        # 任务相关处理（保持原样）
        if self.task_name in ['long_term_forecast', 'short_term_forecast']:
            dec_out = self.dec_embed(x_dec, x_mark_dec)
            output = self.decoder(dec_out, enc_out)
            output = self.projection(output)  # 添加投影层处理
            return output[:, -self.configs.pred_len:, :], total_regu