import torch
import torch.nn.functional as F
from torch_geometric.nn import MessagePassing
from torch_geometric.data import Data
from torch_scatter import scatter
from seasoncnn import CnnSeason


class NodeUpdateLayer(MessagePassing):
    def __init__(self, edge_in_channels, model_dim, time_seq, cnnlayer=3, topk=1):
        super(NodeUpdateLayer, self).__init__(aggr='mean')  # "Mean" aggregation.
        # 模型的输入特征必须是node_in_channels + edge_in_channels
        self.update_model = torch.nn.Sequential(
            torch.nn.Linear(edge_in_channels + model_dim, model_dim), 
            CnnSeason(cnnlayer, topk, model_dim, time_seq)
        )

    def forward(self, x:torch.Tensor, edge_index:torch.Tensor, edge_attr:torch.Tensor, batch:torch.Tensor):
        # propagate方法会以次解包索引，计算消息，聚合，更新节点
        output = self.propagate(edge_index, x=x, edge_attr=edge_attr, batch=batch)
        return output

    def message(self, x_j, edge_attr):
        # x_j: 每条边的源节点特征，尺寸为[time_seq, num_edges * batch_size, node_in_channels]，propagate中根据edge_index取出对应的x中特征
        # 这里的处理方式相当于将每条边的特征和其源节点特征混合在了一起，保留时间维度
        message_aggr = torch.cat([x_j, edge_attr], dim=-1)
        return message_aggr

    def update(self, aggr_out):
        # scatter的作用是根据目标节点汇聚所有特征，比如[1, 1]表示0和1这个位置的message都要交给1，所以需要汇聚
        # 汇聚之后的输出是aggr_out，尺寸为[time_seq, num_nodes * batch_size, node_in_channels + edge_in_channels]
        aggr_out = aggr_out.permute(1, 0, 2).contiguous()
        return self.update_model(aggr_out).permute(1, 0, 2).contiguous()

    def aggregate(self, inputs, index, batch):
        return scatter(inputs, index, dim=1, reduce="mean")


if __name__ == "__main__":
    # x为节点特征，尺寸为[batch_size, num_nodes, time_seq, features]
    # 其中batch表示批次大小，num_nodes表示每个节点，time_seq表示时间序列，features表示不同特征
    x = torch.tensor([
        [
            [[4, 6, 7], [1, 2, 3], [2, 3, 4], [9, 9, 9]], 
            [[1, 2, 3], [2, 3, 4], [3, 5, 6], [4, 4, 4]], 
            [[2, 3, 4], [3, 5, 6], [4, 4, 4], [1, 2, 3]]
        ],
        [
            [[1, 2, 3], [2, 3, 4], [3, 5, 6], [4, 4, 4]], 
            [[4, 6, 7], [1, 2, 3], [2, 3, 4], [2, 3, 1]], 
            [[1, 2, 3], [2, 3, 4], [3, 5, 6], [4, 4, 4]]
        ]
    ], dtype=torch.float32)
    
    # edge_index表示边，尺寸为[batch_size, 2, num_edges]，num_edges为边的个数。edge_index的第一维是源节点，第二维是目标节点
    edge_index = torch.tensor([
        [[0, 1, 2, 1, 0],[1, 2, 1, 0, 2]], 
        [[2, 1, 2, 1, 0],[1, 2, 0, 0, 2]], 
    ], dtype=torch.long)
    
    # edge_attr表示边特征，尺寸为[batch_size, num_edges, edge_in_channels]，edge_in_channels表示边特征个数
    edge_attr = torch.tensor([
        [[0.5], [1.5], [2.5], [3.5], [4]], 
        [[0.7], [1.7], [2.7], [3.7], [9]]
    ], dtype=torch.float32)
    
    batch_size, num_nodes, time_seq, features = x.shape

    batch = torch.cat([torch.full((num_nodes,), i, dtype=torch.long) for i in range(batch_size)])

    # 初始化和使用NodeUpdateLayer
    node_update_layer = NodeUpdateLayer(1, features, time_seq)
    updated_node_features = node_update_layer(x, edge_index, edge_attr, batch)

    # 最终返回值是[num_nodes，node_in_channels]
    print("Updated Node Features:")
    print(updated_node_features.shape)
