# 




"""
边特征softmax操作

提供与DGL兼容的边特征softmax操作，使用PyG作为后端
"""

import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu
import sys
import os

# 添加路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(os.path.dirname(current_dir))
if parent_dir not in sys.path:
    sys.path.insert(0, parent_dir)

# 从Graph模块导入is_dgl_graph函数
from ops.src.graph.graph import is_dgl_graph

# 尝试导入torch_scatter
try:
    from torch_scatter import scatter_softmax
    HAS_TORCH_SCATTER = True
except ImportError:
    HAS_TORCH_SCATTER = False

def manual_edge_softmax(src, index, dim_size):
    """
    手动实现边特征softmax
    
    参数:
        src: 边特征
        index: 目标节点索引
        dim_size: 节点总数
    
    返回:
        softmax后的边特征
    """
    # 对每个目标节点的入边进行softmax
    # 1. 计算每个节点的最大值用于数值稳定
    max_per_node = torch.full((dim_size,), float('-inf')).to(device=src.device, dtype=src.dtype)
    
    # 使用scatter_reduce替代deprecated的scatter with reduce
    try:
        max_per_node.scatter_reduce_(0, index, src, reduce='amax', include_self=False)
    except (AttributeError, RuntimeError):
        # 如果scatter_reduce_不可用，使用旧方法
        max_per_node.scatter_(0, index, src, reduce='max')
    
    # 2. 减去最大值
    max_values = max_per_node[index]
    exp_src = torch.exp(src - max_values)
    
    # 3. 计算分母
    sum_per_node = torch.zeros((dim_size,)).to(device=src.device, dtype=src.dtype)
    sum_per_node.scatter_add_(0, index, exp_src)
    
    # 4. 归一化
    return exp_src / sum_per_node[index].clamp(min=1e-12)

def npu_edge_softmax_with_padding(src, index, dim_size):
    """
    使用 AscendC 实现的 npu_edge_softmax，并在框架侧对最后一维做 8 对齐 padding。

    约定：
    - src: [num_edges] 或 [num_edges, F]
    - index: [num_edges]，int32，表示目标节点索引（与PyG中dst一致）
    - dim_size: int，总节点数

    返回：与 src 同形状的 softmax 结果（若进行了 padding 会在返回前去除）。
    """
    # 统一形状到 [E, F]
    t_start = torch.npu.synchronize() or torch.cuda.synchronize() if torch.cuda.is_available() else None
    import time
    _t0 = time.time()
    need_squeeze_last_dim = False
    if src.dim() == 1:
        src = src.unsqueeze(-1)
        need_squeeze_last_dim = True

    # AscendC 要求最后一维 F 按 8 对齐
    feature_dim = src.size(-1)
    pad = (8 - (feature_dim % 8)) % 8

    if pad > 0:
        src_padded = torch.nn.functional.pad(src, (0, pad), mode="constant", value=0)
    else:
        src_padded = src

    # Ascend NPU 原生实现对 index dtype 敏感：确保为 int32 且在同设备
    if index.dtype != torch.int32:
        index = index.to(torch.int32)
    if index.device != src_padded.device:
        index = index.to(src_padded.device)

    # 调用 AscendC 实现
    out_padded = torch_npu.npu_edge_softmax(src_padded, index, int(dim_size))

    # 去除 padding 并还原形状
    if pad > 0:
        out = out_padded[..., :feature_dim]
    else:
        out = out_padded

    if need_squeeze_last_dim:
        out = out.squeeze(-1)

    _elapsed = time.time() - _t0
    # try:
    #     print(f"[perf] edge_softmax.ascendc time: {_elapsed:.6f}s, E={src.shape[0]}, F={src.shape[-1] if src.dim()>1 else 1}, N={int(dim_size)}")
    # except Exception:
    #     pass
    return out

def npu_native_edge_softmax(src, index, dim_size):
    """
    为NPU优化的、不使用scatter_reduce的手动softmax实现 (V3)。
    这个版本使用排序和segment_reduce来实现scatter_max，以避免CPU回退。
    """
    import time
    _t0 = time.time()
    # 1. 使用排序和segment_reduce来计算每个节点的最大值
    perm = index.argsort()
    sorted_src = src[perm]
    sorted_index = index[perm]
    # 精简：不打印中间形状，仅在末尾输出耗时
    # print(f"sorted_src.shape: {sorted_src.shape}")
    # print(f"sorted_index.shape: {sorted_index.shape}")  
    # print(f"src.shape: {src.shape}")
    # print(f"index.shape: {index.shape}")
    # print(f"dim_size: {dim_size}")
    # print("===================npu_native_edge_softmax===================")
    unique_indices, counts = torch.unique_consecutive(sorted_index, return_counts=True)

    # segment_reduce计算每个段的最大值
    segment_maxes = torch.segment_reduce(data=sorted_src, reduce="max", lengths=counts)
    
    # 将计算出的最大值放回一个完整大小的张量中
    max_val_shape = (dim_size,) + src.shape[1:]
    max_val = torch.full(max_val_shape, float('-inf'), device=src.device, dtype=src.dtype)
    max_val[unique_indices] = segment_maxes

    # 2. 减去最大值并取指数
    max_val_gathered = max_val.index_select(0, index)
    src = torch.exp(src - max_val_gathered)

    # 3. 使用 scatter_add_ 计算分母 (这个通常是支持的)
    sum_val = torch.zeros((dim_size,) + src.shape[1:], device=src.device, dtype=src.dtype)
    if src.dim() > 1:
        index_expanded = index.unsqueeze(1).expand_as(src)
        sum_val.scatter_add_(0, index_expanded, src)
    else:
        sum_val.scatter_add_(0, index, src)

    # 4. 归一化
    sum_val_gathered = sum_val.index_select(0, index)
    output_val = src / (sum_val_gathered + 1e-16)
    
    _elapsed = time.time() - _t0
    # try:
    #     fdim = src.shape[-1] if src.dim() > 1 else 1
    #     print(f"[perf] edge_softmax.native time: {_elapsed:.6f}s, E={src.shape[0]}, F={fdim}, N={int(dim_size)}")
    # except Exception:
    #     pass
    return output_val


def edge_softmax(graph, edge_weights):
    """
    对边特征进行softmax操作
    
    参数:
        graph: Graph对象或DGL图
        edge_weights: 边权重张量
    
    返回:
        softmax后的边权重
    """
    # 检查图是否为DGL图
    if is_dgl_graph(graph):
        # DGL图
        from dgl.ops import edge_softmax as dgl_edge_softmax
        return dgl_edge_softmax(graph, edge_weights)
    else:
        # PyG风格的Graph
        dst = graph.edge_index[1]
        num_nodes = graph.num_nodes
        
        # 确保num_nodes是整数
        if hasattr(num_nodes, '__call__'):
            num_nodes = num_nodes()
        
        # 判断是否在NPU上
        is_npu = (str(edge_weights.device).startswith('npu'))
        use_ascendc = os.environ.get('USE_ASCENDC_EDGE_SOFTMAX', '1').lower() in ('1', 'true', 'yes')
        
        # 处理不同维度的情况
        if edge_weights.dim() <= 1:
            # 如果edge_weights是一维的，直接应用softmax
            if is_npu:
                if use_ascendc:
                    try:
                        return npu_edge_softmax_with_padding(edge_weights, dst, num_nodes)
                    except Exception as e:
                        print(f"[edge_softmax] AscendC op failed, fallback to native: {e}")
                        return npu_native_edge_softmax(edge_weights, dst, num_nodes)
                else:
                    return npu_native_edge_softmax(edge_weights, dst, num_nodes)
            elif HAS_TORCH_SCATTER:
                return scatter_softmax(edge_weights, dst, dim=0)
            else:
                return manual_edge_softmax(edge_weights, dst, num_nodes)
        else:
            # 如果edge_weights是多维的，逐维度应用softmax
            if is_npu:
                if use_ascendc:
                    try:
                        return npu_edge_softmax_with_padding(edge_weights, dst, num_nodes)
                    except Exception as e:
                        print(f"[edge_softmax] AscendC op failed, fallback to native: {e}")
                        return npu_native_edge_softmax(edge_weights, dst, num_nodes)
                else:
                    return npu_native_edge_softmax(edge_weights, dst, num_nodes)

            original_shape = edge_weights.shape
            # 展平成2D: [num_edges, -1]
            edge_weights_flat = edge_weights.reshape(edge_weights.shape[0], -1)
            result = []
            for i in range(edge_weights_flat.shape[1]):
                feature = edge_weights_flat[:, i]
                if HAS_TORCH_SCATTER:
                    result.append(scatter_softmax(feature, dst, dim=0))
                else:
                    result.append(manual_edge_softmax(feature, dst, num_nodes))
            # 重新组织成原始形状
            result = torch.stack(result, dim=1)
            return result.reshape(original_shape) 


