"""
广义稀疏矩阵-矩阵乘法操作

提供与DGL兼容的gspmm操作，使用PyG作为后端
"""

import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu
import sys
import os

# 添加路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(os.path.dirname(current_dir))
if parent_dir not in sys.path:
    sys.path.insert(0, parent_dir)

from ops.src.graph.graph import is_dgl_graph


def gspmm(graph, op, reduce_op, lhs_data, rhs_data=None):
    # print("===================gspmm===================")
    """
    广义稀疏矩阵-矩阵乘法
    
    参数:
        graph: Graph对象或DGL图
        op: 操作类型，如 'mul', 'add', 'sub', 'div'
        reduce_op: 聚合操作，如 'sum', 'max', 'min', 'mean'
        lhs_data: 左侧数据，通常是边特征
        rhs_data: 右侧数据，通常是节点特征，可为None
    
    返回:
        计算结果
    """
    # 检查图是否为DGL图
    if is_dgl_graph(graph):
        # DGL图
        import dgl.ops as dgl_ops
        return dgl_ops.gspmm(graph, op, reduce_op, lhs_data, rhs_data)
    else:
        # PyG风格的Graph
        src, dst = graph.edge_index
        
        # 处理不同的操作类型
        if op == 'mul' and rhs_data is not None:
            msg = lhs_data * rhs_data[src]
        elif op == 'add' and rhs_data is not None:
            msg = lhs_data + rhs_data[src]
        elif op == 'sub' and rhs_data is not None:
            msg = lhs_data - rhs_data[src]
        elif op == 'div' and rhs_data is not None:
            msg = lhs_data / (rhs_data[src] + 1e-6)  # 添加小值防止除零
        else:
            msg = lhs_data
        
        # 处理不同的聚合操作
        if reduce_op == 'sum':
            result = torch.zeros(graph.num_nodes(), msg.shape[1], 
                               device=msg.device, dtype=msg.dtype)
            result.index_add_(0, dst, msg)
        elif reduce_op == 'max':
            # 使用scatter_max实现最大值聚合
            result = torch.full((graph.num_nodes(), msg.shape[1]), 
                              float('-inf'), device=msg.device, dtype=msg.dtype)
            for i in range(len(dst)):
                result[dst[i]] = torch.max(result[dst[i]], msg[i])
        elif reduce_op == 'min':
            # 使用scatter_min实现最小值聚合
            result = torch.full((graph.num_nodes(), msg.shape[1]), 
                              float('inf'), device=msg.device, dtype=msg.dtype)
            for i in range(len(dst)):
                result[dst[i]] = torch.min(result[dst[i]], msg[i])
        elif reduce_op == 'mean':
            # 计算每个节点的入度
            in_degrees = torch.zeros(graph.num_nodes(), device=msg.device)
            in_degrees.index_add_(0, dst, torch.ones_like(dst, dtype=torch.float))
            
            # 求和然后除以入度
            result = torch.zeros(graph.num_nodes(), msg.shape[1], 
                               device=msg.device, dtype=msg.dtype)
            result.index_add_(0, dst, msg)
            
            # 处理入度为0的情况
            mask = in_degrees > 0
            in_degrees = in_degrees.view(-1, 1)
            result[mask] = result[mask] / in_degrees[mask]
        else:
            raise ValueError(f"Unsupported reduce operation: {reduce_op}")
        
        return result 