import torch
from torch import nn, einsum, broadcast_tensors
import torch.nn.functional as F
from typing import Optional, List, Union
import torch_geometric
from torch_geometric.nn import MessagePassing, global_mean_pool
from torch_geometric.typing import Adj, Size, OptTensor, Tensor
    
def exists(val):
    return val is not None    
    
class SiLU(nn.Module):
    def forward(self, x):
        return x * x.sigmoid()
    
    
class EGNN_Sparse(MessagePassing):
    """ Different from the above since it separates the edge assignment
        from the computation (this allows for great reduction in time and 
        computations when the graph is locally or sparse connected).
        * aggr: one of ["add", "mean", "max"]
    """
    def __init__(
        self,
        feats_dim,
        pos_dim=3,
        edge_attr_dim = 0,
        m_dim = 16,
        fourier_features = 0,
        soft_edge = 0,
        norm_feats = False,
        norm_coors = False,
        norm_coors_scale_init = 1e-2,
        update_feats = True,
        update_coors = True, 
        dropout = 0.,
        coor_weights_clamp_value = None, 
        aggr = "add",
        **kwargs
    ):
        assert aggr in {'add', 'sum', 'max', 'mean'}, 'pool method must be a valid option'
        assert update_feats or update_coors, 'you must update either features, coordinates, or both'
        kwargs.setdefault('aggr', aggr)
        super(EGNN_Sparse, self).__init__(**kwargs)
        # model params
        self.fourier_features = fourier_features
        self.feats_dim = feats_dim
        self.pos_dim = pos_dim
        self.m_dim = m_dim
        self.soft_edge = soft_edge
        self.norm_feats = norm_feats
        self.norm_coors = norm_coors
        self.update_coors = update_coors
        self.update_feats = update_feats
        self.coor_weights_clamp_value = None

        self.edge_input_dim = (fourier_features * 2) + edge_attr_dim + 1 + (feats_dim * 2)
        self.dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()

        # EDGES
        self.edge_mlp = nn.Sequential(
            nn.Linear(self.edge_input_dim, self.edge_input_dim * 2),
            self.dropout,
            SiLU(),
            nn.Linear(self.edge_input_dim * 2, m_dim),
            SiLU()
        )

        self.edge_weight = nn.Sequential(nn.Linear(m_dim, 1), 
                                         nn.Sigmoid()
        ) if soft_edge else None

        # NODES - can't do identity in node_norm bc pyg expects 2 inputs, but identity expects 1. 
        self.node_norm = torch_geometric.nn.norm.LayerNorm(feats_dim) if norm_feats else None
        # self.coors_norm = CoorsNorm(scale_init = norm_coors_scale_init) if norm_coors else nn.Identity()

        self.node_mlp = nn.Sequential(
            nn.Linear(feats_dim + m_dim, feats_dim * 2),
            self.dropout,
            SiLU(),
            nn.Linear(feats_dim * 2, feats_dim),
        ) if update_feats else None

        # COORS
        self.coors_mlp = nn.Sequential(
            nn.Linear(m_dim, m_dim * 2),
            self.dropout,
            SiLU(),
            nn.Linear(self.m_dim * 2, 1)
        ) if update_coors else None

        self.apply(self.init_)

    def init_(self, module):
        if type(module) in {nn.Linear}:
            # seems to be needed to keep the network from exploding to NaN with greater depths
            nn.init.xavier_normal_(module.weight)
            nn.init.zeros_(module.bias)

    def forward(self, x: Tensor, edge_index: Adj,
                edge_attr: OptTensor = None, batch: Adj = None, 
                angle_data: List = None,  size: Size = None) -> Tensor:
        """ Inputs: 
            * x: (n_points, d) where d is pos_dims + feat_dims
            * edge_index: (2, n_edges)
            * edge_attr: tensor (n_edges, n_feats) excluding basic distance feats.
            * batch: (n_points,) long tensor. specifies xloud belonging for each point
            * angle_data: list of tensors (levels, n_edges_i, n_length_path) long tensor.
            * size: None
        """
        coors, feats = x[:, :self.pos_dim], x[:, self.pos_dim:]
        
        rel_coors = coors[edge_index[0]] - coors[edge_index[1]]
        rel_dist  = (rel_coors ** 2).sum(dim=-1, keepdim=True)

        if exists(edge_attr):
            edge_attr_feats = torch.cat([edge_attr, rel_dist], dim=-1)
        else:
            edge_attr_feats = rel_dist

        hidden_out, coors_out = self.propagate(edge_index, x=feats, edge_attr=edge_attr_feats,
                                                           coors=coors, rel_coors=rel_coors, 
                                                           batch=batch)
        return torch.cat([coors_out, hidden_out], dim=-1)


    def message(self, x_i, x_j, edge_attr) -> Tensor:
        m_ij = self.edge_mlp( torch.cat([x_i, x_j, edge_attr], dim=-1) )
        return m_ij

    def propagate(self, edge_index: Adj, size: Size = None, **kwargs):
        """The initial call to start propagating messages.
            Args:
            `edge_index` holds the indices of a general (sparse)
                assignment matrix of shape :obj:`[N, M]`.
            size (tuple, optional) if none, the size will be inferred
                and assumed to be quadratic.
            **kwargs: Any additional data which is needed to construct and
                aggregate messages, and to update node embeddings.
        """
        size = self._check_input(edge_index, size)
        coll_dict = self._collect(self._user_args,
                                     edge_index, size, kwargs)
        msg_kwargs = self.inspector.collect_param_data('message', coll_dict)
        aggr_kwargs = self.inspector.collect_param_data('aggregate', coll_dict)
        update_kwargs = self.inspector.collect_param_data('update', coll_dict)
        
        # get messages
        m_ij = self.message(**msg_kwargs)

        # update coors if specified
        if self.update_coors:
            coor_wij = self.coors_mlp(m_ij)
            # clamp if arg is set
            if self.coor_weights_clamp_value:
                coor_weights_clamp_value = self.coor_weights_clamp_value
                # coor_weights.clamp_(min = -clamp_value, max = clamp_value)

            # normalize if needed
            # kwargs["rel_coors"] = self.coors_norm(kwargs["rel_coors"])

            mhat_i = self.aggregate(coor_wij * kwargs["rel_coors"], **aggr_kwargs)
            coors_out = kwargs["coors"] + mhat_i
        else:
            coors_out = kwargs["coors"]

        # update feats if specified
        if self.update_feats:
            # weight the edges if arg is passed
            if self.soft_edge:
                m_ij = m_ij * self.edge_weight(m_ij)
            m_i = self.aggregate(m_ij, **aggr_kwargs)

            hidden_feats = self.node_norm(kwargs["x"], kwargs["batch"]) if self.node_norm else kwargs["x"]
            hidden_out = self.node_mlp( torch.cat([hidden_feats, m_i], dim = -1) )
            hidden_out = kwargs["x"] + hidden_out
        else: 
            hidden_out = kwargs["x"]

        # return tuple
        return self.update((hidden_out, coors_out), **update_kwargs)

    def __repr__(self):
        dict_print = {}
        return "E(n)-GNN Layer for Graphs " + str(self.__dict__) 
    
    
class ZEOEGNN_Sparse(nn.Module):
    """
    基于稀疏图结构的EGNN模型，用于分子结构预测任务。
    
    该模型主要用于处理分子图数据，能够同时学习节点特征和空间位置信息。
    支持两种模式：'conv'用于特征提取，'task'用于最终预测任务。
    
    Attributes:
        n_layers (int): EGNN层数
        node_dim (int): 输入节点特征维度
        pos_dim (int): 空间坐标维度（通常为3）
        edge_attr_dim (int): 边特征维度
        hidden_dim (int): 隐藏层维度
        device (str): 计算设备（'cpu'或'cuda'）
        out_node_nf (int): 输出节点特征维度
        mode (str): 模型模式，'conv'或'task'
    """
    def __init__(
            self,
            n_layers,
            node_dim,
            out_node_nf,
            pos_dim=3,
            edge_attr_dim=0,
            hidden_f=128,
            dropout=0.,
            aggr="add",
            device="cpu",

    ):
        super().__init__()
        self.n_layers = n_layers
        self.node_dim = node_dim
        self.pos_dim = pos_dim
        self.edge_attr_dim = edge_attr_dim
        self.hidden_f = hidden_f
        self.device = device
        self.out_node_nf = out_node_nf
        # self.mode = mode

        # 输入特征嵌入层
        self.embedding_in = nn.Linear(node_dim, hidden_f)

        # EGNN消息传递层
        self.mpnn_layers = nn.ModuleList([
            EGNN_Sparse(
                feats_dim=hidden_f,
                pos_dim=pos_dim,
                edge_attr_dim=edge_attr_dim,
                m_dim=hidden_f,
                dropout=dropout,
                aggr=aggr
            )
            for _ in range(n_layers)
        ])

        # 输出层
        self.embedding_out = nn.Sequential(
            nn.Linear(self.hidden_f, self.hidden_f * 2),  # 先升维
            nn.ReLU(),
            nn.Linear(self.hidden_f * 2, self.out_node_nf)  # 再降维到目标类别数
        )
        self.to(self.device)

    def forward(self, node_attr, pos, edge_index, batch, edge_attr, size=None, mode="conv",):
        node_attr = self.embedding_in(node_attr)  # (batch_size*num_nodes, hidden_f)

        x = torch.cat([pos, node_attr], dim=-1) # (batch_size*num_nodes, pos_dim+node_dim)

        # 逐层传递
        for layer in self.mpnn_layers:
            x += layer(x=x, edge_index=edge_index, edge_attr=edge_attr, batch=batch, size=size) # (batch_size*num_nodes, pos_dim+node_dim) -> (batch_size*num_nodes, pos_dim+node_dim)

        # 分离坐标和节点特征
        coordinates, node_features = x[:, :self.pos_dim], x[:, self.pos_dim:] # (batch_size*num_nodes, pos_dim), (batch_size*num_nodes, node_dim)

        node_features = global_mean_pool(node_features, batch) # (batch_size*num_nodes, node_dim) -> (batch_size, node_dim)

        if mode == "task":
            node_features = self.embedding_out(node_features)

        return node_features, coordinates

    def __repr__(self):
        return f'EGNN_Sparse_Network_Clean of {len(self.mpnn_layers)} layers'


if __name__ == '__main__':
    # 测试参数
    batch_size = 2
    n_nodes = 4
    n_feat = 4
    pos_dim = 3
    edge_attr_dim = 2
    hidden_f = 128
    classes = 15
    
    # 创建测试数据
    # 节点特征和坐标 (batch_size * n_nodes, n_feat + pos_dim)
    node_attr = torch.randn(batch_size * n_nodes, n_feat)
    pos = torch.randn(batch_size * n_nodes, pos_dim)
    
    # 边连接关系 (2, num_edges)
    edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 6, 7],
                         [1, 2, 3, 0, 5, 6, 7, 4]])
    
    # 边特征 (num_edges, edge_attr_dim)
    edge_attr = torch.randn(edge_index.size(1), edge_attr_dim)
    
    # 批次信息 (batch_size * n_nodes,)
    batch = torch.repeat_interleave(torch.arange(batch_size), n_nodes)

    # 初始化EGNN
    egnn = ZEOEGNN_Sparse(node_dim=n_feat, out_node_nf=classes, n_layers=1, pos_dim=pos_dim, edge_attr_dim=edge_attr_dim, hidden_f=hidden_f, device='cpu')

    # 运行EGNN
    for mode in ['conv', 'task']:
        out_x, out_pos = egnn(node_attr=node_attr, pos=pos, edge_index=edge_index, batch=batch, edge_attr=edge_attr, mode=mode)

        print(f"Mode: {mode}, out_x: {out_x.size()}, out_pos: {out_pos.size()}")
    