import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from cross_models.attn import FullAttention, AttentionLayer, TwoStageAttentionLayer  # 导入注意力层

# 1. 解码器层：每个层负责一个尺度的预测
class DecoderLayer(nn.Module):
    '''
    The decoder layer of Crossformer, each layer will make a prediction at its scale
    '''
    def __init__(self, seg_len, d_model, n_heads, d_ff=None, dropout=0.1, out_seg_num = 10, factor = 10):
        super(DecoderLayer, self).__init__()
        # 解码器自注意力（TSA 层）
        self.self_attention = TwoStageAttentionLayer(out_seg_num, factor, d_model, n_heads, \
                                d_ff, dropout)
        # 交叉注意力：融合编码器特征
        self.cross_attention = AttentionLayer(d_model, n_heads, dropout = dropout)
        # 归一化和 dropout
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)
        # MLP 增强表达
        self.MLP1 = nn.Sequential(nn.Linear(d_model, d_model),
                                nn.GELU(),
                                nn.Linear(d_model, d_model))
        # 线性层：将特征映射回原始时间步长度（每个分段预测 seg_len 个时间步）
        self.linear_pred = nn.Linear(d_model, seg_len)

    def forward(self, x, cross):
        '''
        x: the output of last decoder layer
        cross: the output of the corresponding encoder layer
        '''
        # x：解码器输入（当前尺度特征），形状 [batch_size, 数据维度, 输出分段数, d_model]
        # cross：编码器对应尺度的特征，形状 [batch_size, 数据维度, 输入分段数, d_model]

        batch = x.shape[0]
        # 步骤1：解码器自注意力
        x = self.self_attention(x)
        # 重排为 [batch*数据维度, 输出分段数, d_model]
        x = rearrange(x, 'b ts_d out_seg_num d_model -> (b ts_d) out_seg_num d_model')

        # 步骤2：交叉注意力（融合编码器特征）
        # 重排编码器特征为 [batch*数据维度, 输入分段数, d_model]
        cross = rearrange(cross, 'b ts_d in_seg_num d_model -> (b ts_d) in_seg_num d_model')
        # 计算交叉注意力（查询：解码器特征；键值：编码器特征）
        tmp = self.cross_attention(
            x, cross, cross,
        )
        # 残差连接 + 归一化 + MLP
        x = x + self.dropout(tmp)
        y = x = self.norm1(x)
        y = self.MLP1(y)
        dec_output = self.norm2(x+y)

        # 步骤3：预测当前尺度的时间序列
        # 恢复维度：[batch_size, 数据维度, 输出分段数, d_model]
        dec_output = rearrange(dec_output, '(b ts_d) seg_dec_num d_model -> b ts_d seg_dec_num d_model', b = batch)
        # 每个分段预测 seg_len 个时间步，形状 [batch_size, 数据维度*输出分段数, seg_len]
        layer_predict = self.linear_pred(dec_output)
        layer_predict = rearrange(layer_predict, 'b out_d seg_num seg_len -> b (out_d seg_num) seg_len')

        return dec_output, layer_predict

# 2. 解码器：融合多尺度预测结果
class Decoder(nn.Module):
    '''
    The decoder of Crossformer, making the final prediction by adding up predictions at each scale
    '''
    def __init__(self, seg_len, d_layers, d_model, n_heads, d_ff, dropout,\
                router=False, out_seg_num = 10, factor=10):
        super(Decoder, self).__init__()
        # 解码器层列表（数量与编码器尺度数一致）
        self.router = router
        self.decode_layers = nn.ModuleList()
        for i in range(d_layers):
            self.decode_layers.append(DecoderLayer(seg_len, d_model, n_heads, d_ff, dropout, \
                                        out_seg_num, factor))

    def forward(self, x, cross):
        # x：解码器初始输入（位置编码），形状 [batch_size, 数据维度, 输出分段数, d_model]
        # cross：编码器的多尺度特征列表

        final_predict = None  # 用于累加各尺度的预测结果
        i = 0

        ts_d = x.shape[1]
        for layer in self.decode_layers:
            cross_enc = cross[i]  # 获取编码器第 i 个尺度的特征
            x, layer_predict = layer(x,  cross_enc)  # 当前层预测
            # 累加多尺度预测结果（HED 设计的核心）
            if final_predict is None:
                final_predict = layer_predict
            else:
                final_predict = final_predict + layer_predict
            i += 1
        # 重排为最终输出形状：[batch_size, 预测时间步长度, 数据维度]
        final_predict = rearrange(final_predict, 'b (out_d seg_num) seg_len -> b (seg_num seg_len) out_d', out_d = ts_d)

        return final_predict

