import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
# import dgl.nn
# from dgl.nn.pytorch import GATConv
from torch.nn import TransformerEncoder, TransformerDecoder
from ext.layers.gltcn import *
from ext.layers.dlutils import *
from ext.layers.spectre import SPECTRELayer
from ext.layers.RevIN import RevIN


# class DTAAD_SpectrePostGLv2MSH_Mod(nn.Module):
class Model(nn.Module):
    """
    DTAAD 魔改版（MSH + AGSR 合并）：三尺度（细L、粗L/2、粗L/4）+ 中等改动
    - RevIN 标准化（输入归一、输出反归一）
    - 粗粒度分支：AvgPool1d → MHA → FFN → 上采样 + 1x1 卷积校正
    - 三尺度门控融合：softmax over scales（支持温度），偏置初始偏向细粒度
    - ST 分解与融合：Seasonality(SPECTRE) + Trend(多核均值池化) + 原始窗口 → 小型FFN
    - 融合残差加入可学习 λ（初值0.7）
    - 自条件与样本加权（AGSR）
    返回与原训练流程一致的张量形状：(1, B, D)
    """
    def __init__(self, configs: dict):
        super().__init__()
        self.name = 'DTAAD_SpectrePostGLv2MSH_Mod'
        self.configs = configs
        self.task_name = configs.task_name
        self.post_process = configs.post_process
        
        # self.lr = 0.001
        # self.batch = 128
        
        feats = configs.enc_in
        self.n_feats = feats
        # 保持原窗口长度，避免对训练流程造成影响
        # self.n_window = 10
        self.n_window = configs.seq_len

        # RevIN 标准化
        self.revin = RevIN(num_features=feats, affine=True, subtract_last=False)

        # Local / Global TCN
        self.l_tcn = Tcn_Local(num_outputs=feats, kernel_size=4, dropout=0.2)
        self.g_tcn = ModernTCN_Global(input_len=self.n_window,
                                      output_dim=feats,
                                      num_vars=feats)

        # Positional encoding + Transformers
        self.pos_encoder = PositionalEncoding(feats, 0.1, self.n_window)
        _enc_heads = 1
        enc_layer_local = TransformerEncoderLayer(d_model=feats, nhead=_enc_heads,
                                                  dim_feedforward=16, dropout=0.1)
        self.transformer_encoder1 = TransformerEncoder(enc_layer_local, num_layers=1)
        enc_layer_global = TransformerEncoderLayer(d_model=feats, nhead=_enc_heads,
                                                   dim_feedforward=16, dropout=0.1)
        self.transformer_encoder = TransformerEncoder(enc_layer_global, num_layers=1)

        # Coarse branches (MSH): 两级粗尺度 s1: ×2，s2: ×4
        self.pool2 = nn.AvgPool1d(kernel_size=2, stride=2, ceil_mode=True)
        self.coarse1_mha = nn.MultiheadAttention(embed_dim=feats, num_heads=_enc_heads, dropout=0.1)
        self.coarse1_ffn = nn.Sequential(nn.Linear(feats, feats), nn.GELU(), nn.Linear(feats, feats))
        self.coarse1_norm1 = nn.LayerNorm(feats)
        self.coarse1_norm2 = nn.LayerNorm(feats)
        self.coarse1_up = nn.ConvTranspose1d(feats, feats, kernel_size=2, stride=2)

        self.pool4 = nn.AvgPool1d(kernel_size=4, stride=4, ceil_mode=True)
        self.coarse2_mha = nn.MultiheadAttention(embed_dim=feats, num_heads=_enc_heads, dropout=0.1)
        self.coarse2_ffn = nn.Sequential(nn.Linear(feats, feats), nn.GELU(), nn.Linear(feats, feats))
        self.coarse2_norm1 = nn.LayerNorm(feats)
        self.coarse2_norm2 = nn.LayerNorm(feats)
        self.coarse2_up = nn.ConvTranspose1d(feats, feats, kernel_size=4, stride=4)

        # 三尺度门控融合（支持温度）
        self.gate3_mlp = nn.Linear(3 * feats, 3 * feats)
        with torch.no_grad():
            if getattr(self.gate3_mlp, 'bias', None) is not None:
                self.gate3_mlp.bias[:feats].add_(1.0)  # 偏向细尺度
        self.gate_temp = 1.0  # 可按需退火

        # Frequency branch 作为 Seasonality
        self.spectre_freq = SPECTRELayer(
            d_model=feats,
            n_heads=_enc_heads,
            max_seq_len=self.n_window,
            use_wavelet=False,
            share_gates=False,
            memory_len=0,
        )

        # Trend 分支：多核平均池化 + softmax 加权
        # 选用不超过窗口的奇数核
        ks = [k for k in [3, 5, 7] if k <= self.n_window]
        self.trend_pools = nn.ModuleList([
            nn.AvgPool1d(kernel_size=k, stride=1, padding=k//2, count_include_pad=False)
            for k in ks
        ])
        self.trend_alpha = nn.Parameter(torch.zeros(len(self.trend_pools), dtype=torch.double))

        # ST 融合：seasonality 投影 + 融合 FFN
        self.st_evo_proj = nn.Linear(feats, feats)
        self.st_fuse = nn.Sequential(
            nn.Linear(3 * feats, feats),
            nn.GELU(),
            nn.Linear(feats, feats),
        )

        # Channel embed + norm
        self.channel_embed = ChannelEmbed(in_channels=feats,
                                          out_channels=feats,
                                          reduction=1,
                                          norm_layer=nn.BatchNorm1d)
        self.fuse_norm = nn.LayerNorm(feats)

        # Decoders and projection
        self.decoder1 = nn.Sequential(nn.Linear(self.n_window, 1), nn.Sigmoid())
        self.decoder2 = nn.Sequential(nn.Linear(self.n_window, 1), nn.Sigmoid())
        self.fcn = nn.Linear(feats, feats)

        # AGSR: self-conditioning gate and assoc weight
        self.sigma_head = nn.Sequential(nn.Linear(feats, feats), nn.Sigmoid())
        self.beta_sc = 1.0
        hidden_mlp = max(1, feats // 2)
        self.assoc_mlp = nn.Sequential(
            nn.Linear(2 * feats, hidden_mlp),
            nn.GELU(),
            nn.Linear(hidden_mlp, 1),
            nn.Sigmoid(),
        )
        self.tau = 1.0
        self._w_last = None

        # 可学习残差系数（double）
        self.lambda_resid = nn.Parameter(torch.tensor(0.7, dtype=torch.double))

        # dtype align
        self.double()

    def _encode_memory_from_g(self, g: torch.Tensor) -> torch.Tensor:
        # g: (B, D, L) -> (L, B, D)
        mem = g.permute(2, 0, 1) * math.sqrt(self.n_feats)
        mem = self.pos_encoder(mem)
        mem = self.transformer_encoder(mem)
        return mem

    def _match_length(self, x: torch.Tensor, target_len: int) -> torch.Tensor:
        cur_len = x.size(-1)
        if cur_len > target_len:
            x = x[..., :target_len]
        elif cur_len < target_len:
            pad = target_len - cur_len
            x = F.pad(x, (0, pad))
        return x

    def _coarse_branch_s1(self, g: torch.Tensor) -> torch.Tensor:
        # s1: 下采样×2，注意力与FFN，线性上采样+1x1校正 → (B, L, D)
        pooled = self.pool2(g)                                 # (B, D, Lc)
        tokens = pooled.permute(2, 0, 1).contiguous()         # (Lc, B, D)
        tokens = self.pos_encoder(tokens)
        attn_out, _ = self.coarse1_mha(tokens, tokens, tokens, need_weights=False)
        tokens = self.coarse1_norm1(tokens + attn_out)
        ff = self.coarse1_ffn(tokens)
        tokens = self.coarse1_norm2(tokens + ff)             # (Lc, B, D)
        up = tokens.permute(1, 2, 0).contiguous()            # (B, D, Lc)
        up = self.coarse1_up(up)                             # (B, D, L')
        up = self._match_length(up, self.n_window)           # (B, D, L)
        return up.permute(0, 2, 1).contiguous()              # (B, L, D)

    def _coarse_branch_s2(self, g: torch.Tensor) -> torch.Tensor:
        # s2: 下采样×4，注意力与FFN，线性上采样+1x1校正 → (B, L, D)
        pooled = self.pool4(g)
        tokens = pooled.permute(2, 0, 1).contiguous()
        tokens = self.pos_encoder(tokens)
        attn_out, _ = self.coarse2_mha(tokens, tokens, tokens, need_weights=False)
        tokens = self.coarse2_norm1(tokens + attn_out)
        ff = self.coarse2_ffn(tokens)
        tokens = self.coarse2_norm2(tokens + ff)
        up = tokens.permute(1, 2, 0).contiguous()
        up = self.coarse2_up(up)
        up = self._match_length(up, self.n_window)
        return up.permute(0, 2, 1).contiguous()

    def _seasonality_branch_bld(self, src_bdl: torch.Tensor) -> torch.Tensor:
        # src_bdl: (B, D, L) → (B, L, D) via SPECTRE
        y_in = src_bdl.permute(0, 2, 1).contiguous()         # (B, L, D)
        y, _ = self.spectre_freq(y_in)                       # (B, L, D)
        return y

    def _trend_branch_bld(self, bld: torch.Tensor) -> torch.Tensor:
        # bld: (B, L, D) → (B, L, D) 多核均值池化加权
        if len(self.trend_pools) == 0:
            return bld
        x = bld.permute(0, 2, 1).contiguous()  # (B, D, L)
        outs = []
        for pool in self.trend_pools:
            outs.append(pool(x))               # 保持 L 不变
        outs = torch.stack(outs, dim=0)        # (K, B, D, L)
        alpha = torch.softmax(self.trend_alpha, dim=0).view(-1, 1, 1, 1)
        trend = (alpha * outs).sum(dim=0)      # (B, D, L)
        return trend.permute(0, 2, 1).contiguous()  # (B, L, D)

    def _forward(self, src: torch.Tensor):
        # RevIN 标准化（到 B,L,D）
        src_bld = src.permute(0, 2, 1).contiguous()      # (B, L, D)
        src_bld = self.revin(src_bld, mode='norm')
        src_norm = src_bld.permute(0, 2, 1).contiguous() # (B, D, L)

        # 1) Local branch → x1
        l = self.l_tcn(src_norm)                             # (B, D, L)
        src1 = l.permute(2, 0, 1) * math.sqrt(self.n_feats)  # (L, B, D)
        src1 = self.pos_encoder(src1)
        z1 = self.transformer_encoder1(src1)                 # (L, B, D)
        c1 = z1 + self.fcn(z1)
        x1 = self.decoder1(c1.permute(1, 2, 0))              # (B, D, 1)

        # 2) AGSR self-conditioning（基于标准化后的数据流）
        B, D, L = src_norm.shape
        z1_pool = z1.mean(dim=0)                             # (B, D)
        sigma = self.sigma_head(z1_pool)                     # (B, D)
        gate_sc = sigma.unsqueeze(-1).expand(B, D, L)        # (B, D, L)
        x1_rep = x1.expand(B, D, L)                          # (B, D, L)
        src2 = src_norm + self.beta_sc * gate_sc * x1_rep    # (B, D, L)

        # 3) Global memory and multi-scale fusion (fine + coarse×2)
        g = self.g_tcn(src2)                                 # (B, D, L)
        fine_mem = self._encode_memory_from_g(g)             # (L, B, D)
        fine_bld = fine_mem.permute(1, 0, 2).contiguous()    # (B, L, D)
        coarse1_bld = self._coarse_branch_s1(g)              # (B, L, D)
        coarse2_bld = self._coarse_branch_s2(g)              # (B, L, D)

        # 三尺度门控：concat → 线性 → reshape → softmax(温度)
        gate_in = torch.cat([fine_bld, coarse1_bld, coarse2_bld], dim=-1)  # (B, L, 3D)
        logits = self.gate3_mlp(gate_in)                                   # (B, L, 3D)
        logits = logits.view(B, L, D, 3)
        gate_w = torch.softmax(logits / self.gate_temp, dim=-1)            # (B, L, D, 3)
        fused_time_bld = (
            gate_w[..., 0] * fine_bld +
            gate_w[..., 1] * coarse1_bld +
            gate_w[..., 2] * coarse2_bld
        )                                                                   # (B, L, D)

        # 4) ST 分解与融合（Seasonality + Trend + 原始窗口）
        sea_bld = self._seasonality_branch_bld(src_norm)    # (B, L, D)
        sea_proj = self.st_evo_proj(sea_bld)                # (B, L, D)
        trend_bld = self._trend_branch_bld(fused_time_bld)  # (B, L, D)
        st_fused_bld = self.st_fuse(torch.cat([src_bld, sea_proj, trend_bld], dim=-1))  # (B, L, D)

        # 5) 通道嵌入与残差归一化（残差来自时间多尺度融合）
        embedded = self.channel_embed(st_fused_bld, N=self.n_window)       # (B, L, D)
        fused_time = fused_time_bld.permute(1, 0, 2).contiguous()          # (L, B, D)
        fused_memory = self.fuse_norm(embedded.permute(1, 0, 2).contiguous() + self.lambda_resid * fused_time)  # (L, B, D)

        # 6) Decode global branch
        z2 = self.fcn(fused_memory)
        c2 = z2 + self.fcn(z2)
        x2 = self.decoder2(c2.permute(1, 2, 0))              # (B, D, 1)

        # 7) 样本加权（可选）
        mem_pool = fused_time.mean(dim=0)                    # (B, D)
        freq_pool = sea_bld.mean(dim=1)                      # (B, D)
        assoc_vec = torch.cat([mem_pool, freq_pool], dim=-1) # (B, 2D)
        w = self.assoc_mlp(assoc_vec).view(-1)               # (B,)
        if self.tau != 1.0:
            w = torch.sigmoid(torch.logit(w.clamp(1e-6, 1 - 1e-6)) / self.tau)
        self._w_last = w.detach()

        # RevIN 反归一：将输出 (B,D,1) → (B,1,D) 做 denorm，再还原形状
        x1_den = self.revin(x1.permute(0, 2, 1).contiguous(), mode='denorm').permute(0, 2, 1).contiguous()
        x2_den = self.revin(x2.permute(0, 2, 1).contiguous(), mode='denorm').permute(0, 2, 1).contiguous()

        return x1_den.permute(2, 0, 1), x2_den.permute(2, 0, 1)

    def set_gate_temperature(self, value: float) -> None:
        """Convenience setter for三尺度门控温度, 自动限制到正区间."""
        self.gate_temp = max(float(value), 1e-3)
    
    def forward(self, src, x_mark_enc=None, x_dec=None, x_mark_dec=None, mask=None):
        if self.task_name == 'anomaly_detection':
            if self.post_process in ['anomaly_v1', 'anomaly_v3']:
                src = src.permute(0, 2, 1)
                x1, x2 = self._forward(src)
                # 判断在训练中
                if self.training:
                    return {"x1": x1, "x2": x2}
                else:
                    return x2
        return None
        
    @staticmethod
    def custom_loss(input, target):
        if isinstance(input, torch.Tensor):
            loss = F.mse_loss(input, target)
        else:
            _lambda = 0.8
            x1 = input["x1"]
            x2 = input["x2"]
            loss = _lambda * F.mse_loss(x1, target) + (1 - _lambda) * F.mse_loss(x2, target)
        return loss
