"""
只保留时间注意力的 GMAN_PDFusion 消融模型。

该变体移除了空间注意力及相关组件，仅使用时间注意力堆叠和
transformAttention 进行历史到未来的特征映射，便于进行时间模块的
独立性能评估。
"""

from __future__ import annotations

from typing import Any, Dict

import torch
import torch.nn as nn
import torch.nn.functional as F

from .gman_components import FC, STEmbedding, temporalAttention, transformAttention


class TemporalAttBlock(nn.Module):
    """单纯的时间注意力残差块。"""

    def __init__(self, K: int, d: int, bn_decay: float, *, mask: bool = False) -> None:
        super().__init__()
        self.temporal_attention = temporalAttention(K, d, bn_decay, mask=mask)

    def forward(self, X: torch.Tensor, STE: torch.Tensor) -> torch.Tensor:
        HT = self.temporal_attention(X, STE)
        return X + HT


class GMANTemporalOnly(nn.Module):
    """
    仅保留时间注意力模块的 GMAN 变体。

    参数说明与原始 GMAN_PDFusion 基本一致，但不会加载空间模式或邻接信息。
    """

    def __init__(self, config: Dict[str, Any], num_nodes: int) -> None:
        super().__init__()
        self.config = config
        self.num_nodes = num_nodes
        self.num_pred = int(config.get("num_pred", 12))
        self.num_his = int(config.get("history_len", config.get("num_his", 12)))

        self.L = int(config.get("L", 3))
        self.K = int(config.get("K", 8))
        self.d = int(config.get("d", 8))
        self.D = self.K * self.d
        self.bn_decay = float(config.get("bn_decay", 0.1))
        self.time_steps_per_day = int(config.get("time_steps_per_day", 24))
        mask_cfg = bool(config.get("temporal_mask", False))

        self.SE = nn.Parameter(torch.randn(num_nodes, self.D))
        nn.init.xavier_uniform_(self.SE)

        self.STEmbedding = STEmbedding(
            self.D,
            self.bn_decay,
            time_steps_per_day=self.time_steps_per_day,
        )
        self.encoder_blocks = nn.ModuleList(
            [
                TemporalAttBlock(self.K, self.d, self.bn_decay, mask=mask_cfg)
                for _ in range(self.L)
            ]
        )
        self.transformAttention = transformAttention(self.K, self.d, self.bn_decay)
        self.decoder_blocks = nn.ModuleList(
            [
                TemporalAttBlock(self.K, self.d, self.bn_decay, mask=mask_cfg)
                for _ in range(self.L)
            ]
        )

        self.FC_1 = FC(
            input_dims=[1, self.D],
            units=[self.D, self.D],
            activations=[F.relu, None],
            bn_decay=self.bn_decay,
        )
        self.FC_2 = FC(
            input_dims=[self.D, self.D],
            units=[self.D, 1],
            activations=[F.relu, None],
            bn_decay=self.bn_decay,
        )

    def forward(self, X: torch.Tensor, TE: torch.Tensor) -> torch.Tensor:
        if X.dim() == 3:
            X = X.unsqueeze(-1)
        elif X.dim() != 4 or X.size(-1) != 1:
            raise ValueError("输入 X 需为 [B, P, N] 或 [B, P, N, 1]")

        X = self.FC_1(X)
        STE = self.STEmbedding(self.SE, TE)
        STE_his = STE[:, : self.num_his]
        STE_pred = STE[:, self.num_his :]

        for block in self.encoder_blocks:
            X = block(X, STE_his)

        X = self.transformAttention(X, STE_his, STE_pred)

        for block in self.decoder_blocks:
            X = block(X, STE_pred)

        X = self.FC_2(X)
        return X.squeeze(-1)


__all__ = ["GMANTemporalOnly"]

