import torch
from torch.nn import Module
from nodeupdatelayer import NodeUpdateLayer
from edgeupdatelayer import EdgeUpdateLayer
from gru import Gru

class GnnBlock(Module):
    def __init__(self, model_dim, edge_channels, time_seq):
        super(GnnBlock, self).__init__()
        self.edge_update = EdgeUpdateLayer(model_dim, edge_channels)
        self.node_update = NodeUpdateLayer(edge_channels, model_dim, time_seq)

    def forward(self, x, edge_index, edge_attr, batch):
        edge_attr = self.edge_update(x, edge_index, edge_attr)
        x = self.node_update(x, edge_index, edge_attr, batch)
        return x, edge_attr


class Model(torch.nn.Module):
    def __init__(self, node_channels, model_dim, edge_channels, time_seq, target_id, layer=3):
        super(Model, self).__init__()
        self.embedding = torch.nn.Linear(in_features=node_channels, out_features=model_dim)
        self.net = torch.nn.ModuleList()
        for _ in range(layer):
            self.net.append(GnnBlock(model_dim, edge_channels, time_seq))
        self.predict = Gru(model_dim, model_dim, 1, 3)
        self.target_id = target_id

    def forward(self, base, x, edge_index, edge_attr):
        batch_size, num_nodes, time_seq, features = x.shape

        x = self.embedding(x)
        
        # 把x的batch和node融合到一维，然后将时间维度提到最前面来
        x = x.reshape(-1, x.shape[-2], x.shape[-1]).permute(1, 0, 2).contiguous()
        
        # batch机制，防止不同batch的边索引信息相互串位
        for i in range(len(edge_index)):
            edge_index[i] = edge_index[i] + i * num_nodes
        
        # 把出点和入点提到最前面，之后将边和batch融合到一维
        edge_index = edge_index.permute(1, 0, 2).contiguous().reshape(2, -1)
        
        # 同样把边和batch融合到一维，并把时间维度提到最前面
        edge_attr = edge_attr.reshape(-1, edge_attr.shape[-2], edge_attr.shape[-1]).permute(1, 0, 2).contiguous()

        # 计算batch
        batch = torch.cat([torch.full((num_nodes,), i, dtype=torch.long) for i in range(batch_size)])

        for layer in self.net:
            x, edge_attr = layer(x, edge_index, edge_attr, batch)
        
        x = x.permute(1, 0, 2).contiguous()
        x = self.predict(x)
        x = x.reshape(batch_size, num_nodes)
        return x[:,self.target_id] + base


if __name__ == "__main__":
    # x为节点特征，尺寸为[batch_size, num_nodes, time_seq, features]
    # 其中batch表示批次大小，num_nodes表示每个节点，time_seq表示时间序列，features表示不同特征
    x = torch.tensor([
        [
            [[4, 6, 7], [1, 2, 3], [2, 3, 4], [9, 9, 9]], 
            [[1, 2, 3], [2, 3, 4], [3, 5, 6], [4, 4, 4]], 
            [[2, 3, 4], [3, 5, 6], [4, 4, 4], [1, 2, 3]]
        ],
        [
            [[1, 2, 3], [2, 3, 4], [3, 5, 6], [4, 4, 4]], 
            [[4, 6, 7], [1, 2, 3], [2, 3, 4], [2, 3, 1]], 
            [[1, 2, 3], [2, 3, 4], [3, 5, 6], [4, 4, 4]]
        ]
    ], dtype=torch.float32)
    
    # edge_index表示边，尺寸为[batch_size, 2, num_edges]，num_edges为边的个数。edge_index的第一维是源节点，第二维是目标节点
    edge_index = torch.tensor([
        [[0, 1, 2, 1, 0],[1, 2, 1, 0, 2]], 
        [[2, 1, 2, 1, 0],[1, 2, 0, 0, 2]], 
    ], dtype=torch.long)
    
    # edge_attr表示边特征，尺寸为[batch_size, num_edges, time_seq, edge_in_channels]，edge_in_channels表示边特征个数
    edge_attr = torch.tensor([
        [
            [[0.5], [0.5], [0.5], [0.5]],
            [[1.5], [1.5], [1.5], [1.5]],
            [[2.5], [2.5], [2.5], [2.5]],
            [[3.5], [3.5], [3.5], [3.5]],
            [[4], [4], [4], [4]]
        ], 
        [
            [[0.7], [0.7], [0.7], [0.7]], 
            [[1.7], [1.7], [1.7], [1.7]],
            [[2.7], [2.7], [2.7], [2.7]],
            [[3.7], [3.7], [3.7], [3.7]],
            [[9], [9], [9], [9]]
        ]
    ], dtype=torch.float32)
    
    batch_size, num_nodes, time_seq, features = x.shape

    # 初始化和使用GnnBlock
    gnn = Model(features, 64, 1, time_seq)
    x = gnn(x, edge_index, edge_attr)

    # 最终返回值是[num_nodes，node_in_channels]
    print("Updated Node Features:")
    print(x.shape)