
"""
边特征softmax操作

提供与DGL兼容的边特征softmax操作，使用PyG作为后端
"""

import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu
import sys
import os

# 添加路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(os.path.dirname(current_dir))
if parent_dir not in sys.path:
    sys.path.insert(0, parent_dir)

# 从Graph模块导入is_dgl_graph函数
from ops.src.graph.graph import is_dgl_graph

# 尝试导入torch_scatter
try:
    from torch_scatter import scatter_softmax
    HAS_TORCH_SCATTER = True
except ImportError:
    HAS_TORCH_SCATTER = False

def manual_edge_softmax(src, index, dim_size):
    """
    手动实现边特征softmax
    
    参数:
        src: 边特征
        index: 目标节点索引
        dim_size: 节点总数
    
    返回:
        softmax后的边特征
    """
    # 对每个目标节点的入边进行softmax
    # 1. 计算每个节点的最大值用于数值稳定
    max_per_node = torch.full((dim_size,), float('-inf')).to(device=src.device, dtype=src.dtype)
    
    # 使用scatter_reduce替代deprecated的scatter with reduce
    try:
        max_per_node.scatter_reduce_(0, index, src, reduce='amax', include_self=False)
    except (AttributeError, RuntimeError):
        # 如果scatter_reduce_不可用，使用旧方法
        max_per_node.scatter_(0, index, src, reduce='max')
    
    # 2. 减去最大值
    max_values = max_per_node[index]
    exp_src = torch.exp(src - max_values)
    
    # 3. 计算分母
    sum_per_node = torch.zeros((dim_size,)).to(device=src.device, dtype=src.dtype)
    sum_per_node.scatter_add_(0, index, exp_src)
    
    # 4. 归一化
    return exp_src / sum_per_node[index].clamp(min=1e-12)

def edge_softmax(graph, edge_weights):
    """
    对边特征进行softmax操作
    
    参数:
        graph: Graph对象或DGL图
        edge_weights: 边权重张量
    
    返回:
        softmax后的边权重
    """
    # 检查图是否为DGL图
    if is_dgl_graph(graph):
        # DGL图
        from dgl.ops import edge_softmax as dgl_edge_softmax
        return dgl_edge_softmax(graph, edge_weights)
    else:
        # PyG风格的Graph
        dst = graph.edge_index[1]
        num_nodes = graph.num_nodes
        
        # 确保num_nodes是整数
        if hasattr(num_nodes, '__call__'):
            num_nodes = num_nodes()
        
        # 判断是否在NPU上
        is_npu = (str(edge_weights.device).startswith('npu'))
        
        # 处理不同维度的情况
        if edge_weights.dim() <= 1:
            # 如果edge_weights是一维的，直接应用softmax
            if not is_npu and HAS_TORCH_SCATTER:
                return scatter_softmax(edge_weights, dst, dim=0)
            else:
                return manual_edge_softmax(edge_weights, dst, num_nodes)
        else:
            # 如果edge_weights是多维的，逐维度应用softmax
            original_shape = edge_weights.shape
            # 展平成2D: [num_edges, -1]
            edge_weights_flat = edge_weights.reshape(edge_weights.shape[0], -1)
            result = []
            for i in range(edge_weights_flat.shape[1]):
                feature = edge_weights_flat[:, i]
                if not is_npu and HAS_TORCH_SCATTER:
                    result.append(scatter_softmax(feature, dst, dim=0))
                else:
                    result.append(manual_edge_softmax(feature, dst, num_nodes))
            # 重新组织成原始形状
            result = torch.stack(result, dim=1)
            return result.reshape(original_shape) 
