import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
import numpy as np

from math import sqrt

# 1. 基础注意力操作（全注意力）
class FullAttention(nn.Module):
    '''
    The Attention operation
    '''
    def __init__(self, scale=None, attention_dropout=0.1):
        super(FullAttention, self).__init__()
        self.scale = scale  # 缩放因子（默认 sqrt(d_model)）
        self.dropout = nn.Dropout(attention_dropout)  # 防止过拟合
        
    def forward(self, queries, keys, values):
        # 输入形状：queries/keys/values 均为 [batch_size, 序列长度, 头数, 每个头的维度]
        B, L, H, E = queries.shape  # B=批量，L=查询序列长度，H=头数，E=每个头维度
        _, S, _, D = values.shape   # S=键值对序列长度

        # 缩放因子（默认用 sqrt(E) 防止梯度消失）
        scale = self.scale or 1./sqrt(E)

        # 计算注意力分数：Q*K^T，形状 [B, H, L, S]
        scores = torch.einsum("blhe,bshe->bhls", queries, keys)
        # 归一化并添加 dropout
        A = self.dropout(torch.softmax(scale * scores, dim=-1))
        # 注意力加权：A*V，形状 [B, L, H, D]
        V = torch.einsum("bhls,bshd->blhd", A, values)
        
        return V.contiguous()  # 确保内存连续

# 2. 多头注意力层（MSA）
class AttentionLayer(nn.Module):
    '''
    The Multi-head Self-Attention (MSA) Layer
    '''
    def __init__(self, d_model, n_heads, d_keys=None, d_values=None, dropout = 0.1):
        super(AttentionLayer, self).__init__()
        # 每个头的维度（默认 d_model 整除 n_heads）
        d_keys = d_keys or (d_model//n_heads)
        d_values = d_values or (d_model//n_heads)

        # 线性层：将输入映射到查询、键、值
        self.inner_attention = FullAttention(scale=None, attention_dropout = dropout)  # 内部注意力操作
        self.query_projection = nn.Linear(d_model, d_keys * n_heads)
        self.key_projection = nn.Linear(d_model, d_keys * n_heads)
        self.value_projection = nn.Linear(d_model, d_values * n_heads)
        # 输出投影：将多头结果合并
        self.out_projection = nn.Linear(d_values * n_heads, d_model)
        self.n_heads = n_heads  # 注意力头数

    def forward(self, queries, keys, values):
        # 输入形状：[batch_size, 序列长度, d_model]
        B, L, _ = queries.shape  # L=查询序列长度
        _, S, _ = keys.shape     # S=键值对序列长度
        H = self.n_heads         # 头数

        # 步骤1：将输入投影到多头空间
        queries = self.query_projection(queries).view(B, L, H, -1)  # [B, L, H, d_keys]
        keys = self.key_projection(keys).view(B, S, H, -1)          # [B, S, H, d_keys]
        values = self.value_projection(values).view(B, S, H, -1)    # [B, S, H, d_values]

        # 步骤2：计算注意力
        out = self.inner_attention(
            queries,
            keys,
            values,
        )  # [B, L, H, d_values]

        # 步骤3：合并多头结果并输出
        out = out.view(B, L, -1)  # [B, L, H*d_values]

        return self.out_projection(out)  # [B, L, d_model]

# 3. 两阶段注意力层（TSA）：Crossformer 核心创新
class TwoStageAttentionLayer(nn.Module):
    '''
    The Two Stage Attention (TSA) Layer
    input/output shape: [batch_size, Data_dim(D), Seg_num(L), d_model]
    '''
    def __init__(self, seg_num, factor, d_model, n_heads, d_ff = None, dropout=0.1):
        super(TwoStageAttentionLayer, self).__init__()
        d_ff = d_ff or 4*d_model  # MLP 中间维度
        # 时间注意力（第一阶段）：处理同一维度内的时间依赖
        self.time_attention = AttentionLayer(d_model, n_heads, dropout = dropout)
        # 维度注意力（第二阶段）：处理不同维度间的依赖（通过 router 机制）
        self.dim_sender = AttentionLayer(d_model, n_heads, dropout = dropout)  # 聚合信息
        self.dim_receiver = AttentionLayer(d_model, n_heads, dropout = dropout)  # 分发信息
        # 可学习的 router 向量：用于降低维度依赖计算复杂度
        self.router = nn.Parameter(torch.randn(seg_num, factor, d_model))  # [分段数, router数量, d_model]
        
        self.dropout = nn.Dropout(dropout)
        # 归一化层：稳定训练
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.norm4 = nn.LayerNorm(d_model)
        # 两层 MLP：增强非线性表达
        self.MLP1 = nn.Sequential(nn.Linear(d_model, d_ff),
                                nn.GELU(),
                                nn.Linear(d_ff, d_model))
        self.MLP2 = nn.Sequential(nn.Linear(d_model, d_ff),
                                nn.GELU(),
                                nn.Linear(d_ff, d_model))

    def forward(self, x):
        #Cross Time Stage: Directly apply MSA to each dimension

        # 输入 x 形状：[batch_size, 数据维度, 分段数, d_model]

        # 第一阶段：时间注意力（处理每个维度内的时间依赖）
        batch = x.shape[0]
        # 重排为 [batch*数据维度, 分段数, d_model]，便于对每个维度单独处理
        time_in = rearrange(x, 'b ts_d seg_num d_model -> (b ts_d) seg_num d_model')
        # 计算时间注意力（自注意力）
        time_enc = self.time_attention(
            time_in, time_in, time_in
        )
        # 残差连接 + 归一化 + MLP
        dim_in = time_in + self.dropout(time_enc)
        dim_in = self.norm1(dim_in)
        dim_in = dim_in + self.dropout(self.MLP1(dim_in))
        dim_in = self.norm2(dim_in)

        # 第二阶段：维度注意力（通过 router 处理跨维度依赖）
        # 重排为 [batch*分段数, 数据维度, d_model]，便于处理维度间关系
        #Cross Dimension Stage: use a small set of learnable vectors to aggregate and distribute messages to build the D-to-D connection
        dim_send = rearrange(dim_in, '(b ts_d) seg_num d_model -> (b seg_num) ts_d d_model', b = batch)
        # 复制 router 以匹配批量大小
        batch_router = repeat(self.router, 'seg_num factor d_model -> (repeat seg_num) factor d_model', repeat = batch)
        # 步骤1：通过 sender 聚合各维度信息到 router
        dim_buffer = self.dim_sender(batch_router, dim_send, dim_send)
        # 步骤2：通过 receiver 将 router 信息分发回各维度
        dim_receive = self.dim_receiver(dim_send, dim_buffer, dim_buffer)
        # 残差连接 + 归一化 + MLP
        dim_enc = dim_send + self.dropout(dim_receive)
        dim_enc = self.norm3(dim_enc)
        dim_enc = dim_enc + self.dropout(self.MLP2(dim_enc))
        dim_enc = self.norm4(dim_enc)

        # 恢复原始形状并输出
        final_out = rearrange(dim_enc, '(b seg_num) ts_d d_model -> b ts_d seg_num d_model', b = batch)

        return final_out
