"""
边特征softmax操作

提供与DGL兼容的边特征softmax操作，使用PyG作为后端
"""

import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu
import sys
import os
import time

# 性能调试开关
DEBUG_PERFORMANCE = False

# 添加路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(os.path.dirname(current_dir))
if parent_dir not in sys.path:
    sys.path.insert(0, parent_dir)

# 从Graph模块导入is_dgl_graph函数
from ops.src.graph.graph import is_dgl_graph

# 尝试导入torch_scatter
try:
    from torch_scatter import scatter_softmax
    HAS_TORCH_SCATTER = True
except ImportError:
    HAS_TORCH_SCATTER = False

def manual_edge_softmax(src, index, dim_size):
    """
    手动实现边特征softmax - NPU优化版本
    
    参数:
        src: 边特征
        index: 目标节点索引
        dim_size: 节点总数
    
    返回:
        softmax后的边特征
    """
    device = src.device
    dtype = src.dtype
    
    # 对每个目标节点的入边进行softmax
    # 1. 计算每个节点的最大值用于数值稳定 - 使用更高效的初始化
    max_per_node = torch.full((dim_size,), float('-inf'), device=device, dtype=dtype)
    
    # 🔧 NPU兼容性修复：scatter操作在NPU上限制很多，使用纯PyTorch实现
    print(f"[edge_softmax debug] src.device.type={device.type}, index.device.type={index.device.type}, dim_size={dim_size}", flush=True)
    if device.type == 'npu':
        # NPU环境：使用循环方式计算最大值（确保完全兼容）
        for i in range(dim_size):
            mask = (index == i)
            if mask.any():
                max_per_node[i] = src[mask].max()
    else:
        # 其他设备：优先使用现代化的scatter_reduce
        try:
            max_per_node.scatter_reduce_(0, index, src, reduce='amax', include_self=False)
        except (AttributeError, RuntimeError):
            # 如果scatter_reduce_不可用，使用旧方法
            try:
                max_per_node.scatter_(0, index, src, reduce='max')
            except RuntimeError:
                # 最后的回退：使用循环方式
                for i in range(dim_size):
                    mask = (index == i)
                    if mask.any():
                        max_per_node[i] = src[mask].max()
    
    # 2. 减去最大值 - 避免重复索引
    max_values = max_per_node[index]
    exp_src = torch.exp(src - max_values)
    
    # 3. 计算分母 - 使用更高效的初始化
    sum_per_node = torch.zeros(dim_size, device=device, dtype=dtype)
    sum_per_node.scatter_add_(0, index, exp_src)
    
    # 4. 归一化 - 使用更严格的数值稳定性
    sum_values = sum_per_node[index]
    return exp_src / sum_values.clamp(min=1e-12)

def edge_softmax(graph, edge_weights):
    """
    对边特征进行softmax操作
    
    参数:
        graph: Graph对象或DGL图
        edge_weights: 边权重张量
    
    返回:
        softmax后的边权重
    """
    # 检查图是否为DGL图
    if is_dgl_graph(graph):
        # DGL图
        from dgl.ops import edge_softmax as dgl_edge_softmax
        return dgl_edge_softmax(graph, edge_weights)
    else:
        # PyG风格的Graph
        dst = graph.edge_index[1]
        num_nodes = graph.num_nodes
        
        # 确保num_nodes是整数
        if hasattr(num_nodes, '__call__'):
            num_nodes = num_nodes()
        
        # 🔧 NPU兼容性修复：检查是否为NPU设备
        is_npu_device = edge_weights.device.type == 'npu'
        original_device = edge_weights.device
        
        def safe_scatter_softmax(feature, index, dim=0):
            """
            NPU兼容的scatter_softmax - 优先使用NPU原生实现
            """
            if is_npu_device:
                # 🚀 NPU性能优化：直接使用NPU原生PyTorch操作，避免CPU-NPU数据传输
                return manual_edge_softmax(feature, index, num_nodes)
            elif HAS_TORCH_SCATTER:
                # 在CPU/GPU环境下使用torch_scatter（更优化）
                return scatter_softmax(feature, index, dim=dim)
            else:
                return manual_edge_softmax(feature, index, num_nodes)
        
        # 处理不同维度的情况
        if edge_weights.dim() <= 1:
            # 如果edge_weights是一维的，直接应用softmax
            return safe_scatter_softmax(edge_weights, dst, dim=0)
        else:
            # 如果edge_weights是多维的，逐维度应用softmax
            original_shape = edge_weights.shape
            # 展平成2D: [num_edges, -1]
            edge_weights_flat = edge_weights.reshape(edge_weights.shape[0], -1)
            result = []
            for i in range(edge_weights_flat.shape[1]):
                feature = edge_weights_flat[:, i]
                result.append(safe_scatter_softmax(feature, dst, dim=0))
            # 重新组织成原始形状
            result = torch.stack(result, dim=1)
            return result.reshape(original_shape) 





## SOFTMAX in 20250722

# """
# 边特征softmax操作

# 提供与DGL兼容的边特征softmax操作，使用PyG作为后端
# """

# import torch
# import torch_npu
# from torch_npu.contrib import transfer_to_npu
# import sys
# import os

# # 添加路径
# current_dir = os.path.dirname(os.path.abspath(__file__))
# parent_dir = os.path.dirname(os.path.dirname(current_dir))
# if parent_dir not in sys.path:
#     sys.path.insert(0, parent_dir)

# # 从Graph模块导入is_dgl_graph函数
# from ops.src.graph.graph import is_dgl_graph

# # 尝试导入torch_scatter
# try:
#     from torch_scatter import scatter_softmax
#     HAS_TORCH_SCATTER = True
# except ImportError:
#     HAS_TORCH_SCATTER = False

# def manual_edge_softmax(src, index, dim_size):
#     """
#     手动实现边特征softmax
    
#     参数:
#         src: 边特征
#         index: 目标节点索引
#         dim_size: 节点总数
    
#     返回:
#         softmax后的边特征
#     """
#     # 对每个目标节点的入边进行softmax
#     # 1. 计算每个节点的最大值用于数值稳定
#     max_per_node = torch.full((dim_size,), float('-inf')).to(device=src.device, dtype=src.dtype)
    
#     # 使用scatter_reduce替代deprecated的scatter with reduce
#     try:
#         max_per_node.scatter_reduce_(0, index, src, reduce='amax', include_self=False)
#     except (AttributeError, RuntimeError):
#         # 如果scatter_reduce_不可用，使用旧方法
#         max_per_node.scatter_(0, index, src, reduce='max')
    
#     # 2. 减去最大值
#     max_values = max_per_node[index]
#     exp_src = torch.exp(src - max_values)
    
#     # 3. 计算分母
#     sum_per_node = torch.zeros((dim_size,)).to(device=src.device, dtype=src.dtype)
#     sum_per_node.scatter_add_(0, index, exp_src)
    
#     # 4. 归一化
#     return exp_src / sum_per_node[index].clamp(min=1e-12)

# def edge_softmax(graph, edge_weights):
#     """
#     对边特征进行softmax操作
    
#     参数:
#         graph: Graph对象或DGL图
#         edge_weights: 边权重张量
    
#     返回:
#         softmax后的边权重
#     """
#     # 检查图是否为DGL图
#     if is_dgl_graph(graph):
#         # DGL图
#         from dgl.ops import edge_softmax as dgl_edge_softmax
#         return dgl_edge_softmax(graph, edge_weights)
#     else:
#         # PyG风格的Graph
#         dst = graph.edge_index[1]
#         num_nodes = graph.num_nodes
        
#         # 确保num_nodes是整数
#         if hasattr(num_nodes, '__call__'):
#             num_nodes = num_nodes()
        
#         # 判断是否在NPU上
#         is_npu = (str(edge_weights.device).startswith('npu'))
        
#         # 处理不同维度的情况
#         if edge_weights.dim() <= 1:
#             # 如果edge_weights是一维的，直接应用softmax
#             if not is_npu and HAS_TORCH_SCATTER:
#                 return scatter_softmax(edge_weights, dst, dim=0)
#             else:
#                 return manual_edge_softmax(edge_weights, dst, num_nodes)
#         else:
#             # 如果edge_weights是多维的，逐维度应用softmax
#             original_shape = edge_weights.shape
#             # 展平成2D: [num_edges, -1]
#             edge_weights_flat = edge_weights.reshape(edge_weights.shape[0], -1)
#             result = []
#             for i in range(edge_weights_flat.shape[1]):
#                 feature = edge_weights_flat[:, i]
#                 if not is_npu and HAS_TORCH_SCATTER:
#                     result.append(scatter_softmax(feature, dst, dim=0))
#                 else:
#                     result.append(manual_edge_softmax(feature, dst, num_nodes))
#             # 重新组织成原始形状
#             result = torch.stack(result, dim=1)
#             return result.reshape(original_shape) 