import torch
import torch.nn as nn
import torch.nn.functional as F

from GATmodel import FullyConnectedGAT
from MLP import MLP
from transformerModel import SingleHeadSelfAttention,GroupedAttention,StackedSelfAttention


# 定义组合模型
class CombinedModel(nn.Module):#GAT和attention
    def __init__(self, seq_length, num_sensors, group_size=10, GAT_output_dim=20, GAT_hidden_dim=128,predict_length=10, num_attention_cycles=3):
        super(CombinedModel, self).__init__()
        self.seq_length = seq_length
        self.num_sensors = num_sensors
        self.group_size = group_size
        self.num_attention_cycles = num_attention_cycles
        self.predict_length = predict_length

        # 定义每一层的 GroupedAttention 和 FullyConnectedGAT
        self.attention_layers = nn.ModuleList([
            GroupedAttention(num_sensors, group_size, num_sensors) for _ in range(num_attention_cycles)
        ])
        self.GAT_layers = nn.ModuleList([
            FullyConnectedGAT(seq_length ,seq_length ,GAT_hidden_dim, dropout=0.6) for _ in range(num_attention_cycles)
        ])

        # 定义 MLP 层
        self.mlps = nn.ModuleList([
            MLP(input_dim=seq_length, output_dim=predict_length, hidden_dims=[64], activation="relu", dropout=0.6)
            for _ in range(num_sensors)
        ])

        # 定义层归一化，用于残差连接
        self.layer_norm = nn.LayerNorm(GAT_output_dim)

    def forward(self, x):
        """
        前向传播
        :param x: 输入张量，形状为 (N, seq_length, num_sensors)
        :return: 输出张量，形状为 (N, predict_length, num_sensors)
        """
        N = x.size(0)

        # 初始输入
        residual = x  # (N, seq_length, num_sensors)

        for i in range(self.num_attention_cycles):
            # 分组注意力
            attention = self.attention_layers[i]
            x = attention(x)  # (N, seq_length, num_sensors)

            # 转换为图输入
            # 每个传感器作为一个节点，传感器之间的关系由GAT处理
            x = x.transpose(1, 2)  # (N, num_sensors, seq_length)

            # 应用 GAT
            GAT = self.GAT_layers[i]
            x = GAT(x)  # (N, num_sensors, GAT_output_dim)

            # 转换回 (N, seq_length, num_sensors)
            x = x.transpose(1, 2)  # (N, seq_length, GAT_output_dim)

            # 残差连接
            x = x + residual  # (N, seq_length, GAT_output_dim)

            # 层归一化
            x = self.layer_norm(x)  # (N, seq_length, GAT_output_dim)

            # 更新 residual 为当前的 x，以便下一个循环使用
            residual = x  # (N, seq_length, GAT_output_dim)

        # 应用 MLP
        outputs = []
        split_x = [x[:, :, i] for i in range(x.size(2))]
        for i in range(self.num_sensors):
            mlp = self.mlps[i]
            output = mlp(split_x[i])  # (N, predict_length)
            outputs.append(output)
        
        # 拼接所有传感器的输出
        x = torch.stack(outputs, dim=1)  # (N, num_sensors, predict_length)

        # 转置为 (N, predict_length, num_sensors)
        x = x.transpose(1, 2)  # (N, predict_length, num_sensors)

        return x


