"""
边特征softmax性能基准测试

这是一个更加直观的基准测试脚本，用于评估edge_softmax算子在NPU上的性能。
"""

import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu
import time
import numpy as np
import os
import sys
from tabulate import tabulate

# 添加路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(os.path.dirname(current_dir))
if parent_dir not in sys.path:
    sys.path.insert(0, parent_dir)

# 导入edge_softmax实现
from edge_softmax import edge_softmax, manual_edge_softmax, HAS_TORCH_SCATTER
# from old_es import edge_softmax, manual_edge_softmax, HAS_TORCH_SCATTER

# 尝试导入PyG
try:
    import torch_geometric as pyg
    from torch_geometric.data import Data as PygGraph
    HAS_PYG = True
except ImportError:
    HAS_PYG = False
    print("警告: PyG未安装，无法进行测试")
    exit(1)

try:
    from torch_scatter import scatter_softmax
    HAS_TORCH_SCATTER_IMPORTED = True
except ImportError:
    HAS_TORCH_SCATTER_IMPORTED = False
    print("警告: torch_scatter未安装，将使用手动实现")

def create_random_graph(num_nodes, num_edges, device='npu'):
    """创建随机图"""
    # 生成随机边
    src = torch.randint(0, num_nodes, (num_edges,), device=device)
    dst = torch.randint(0, num_nodes, (num_edges,), device=device)
    
    edge_index = torch.stack([src, dst])
    
    # 创建PyG图
    pyg_graph = PygGraph(edge_index=edge_index, num_nodes=num_nodes)
    pyg_graph = pyg_graph.to(device)
    
    return edge_index, pyg_graph

def run_benchmark(device='npu'):
    """运行基准测试"""
    print("=" * 80)
    print(f"开始边特征softmax性能基准测试 (设备: {device})")
    print("=" * 80)
    
    # 定义测试配置
    configs = [
        # (节点数, 边数, 特征维度)
        (150, 22350, 8),
    ]
    
    # 存储结果的表格数据
    table_data = []
    
    # 运行次数
    num_runs = 10
    
    for num_nodes, num_edges, feature_dim in configs:
        print(f"\n【测试配置】: {num_nodes}节点, {num_edges}边, 特征维度{feature_dim}")
        
        # 创建图
        edge_index, pyg_graph = create_random_graph(num_nodes, num_edges, device)
        
        # 创建边特征
        if feature_dim == 1:
            edge_weights = torch.rand(num_edges, device=device)
        else:
            edge_weights = torch.rand(num_edges, feature_dim, device=device)
        
        row = [f"{num_nodes}", f"{num_edges}", f"{feature_dim}"]

        # AscendC 算子测试（开启 USE_ASCENDC_EDGE_SOFTMAX=1）
        print("  【测试】AscendC npu_edge_softmax (带padding)...")
        orig_env = os.environ.get('USE_ASCENDC_EDGE_SOFTMAX', '1')
        os.environ['USE_ASCENDC_EDGE_SOFTMAX'] = '1'
        # 预热
        _ = edge_softmax(pyg_graph, edge_weights)
        torch.npu.synchronize()

        times = []
        for run_idx in range(num_runs):
            start = time.time()
            _ = edge_softmax(pyg_graph, edge_weights)
            torch.npu.synchronize()
            end_time = time.time() - start
            times.append(end_time)
            print(f"    运行 {run_idx+1}/{num_runs}: {end_time*1000:.4f}ms")
        avg_time = np.mean(times) * 1000
        print(f"    【结果】平均时间: {avg_time:.4f}ms")
        row.append(f"{avg_time:.4f}")

        # NPU 原生Python实现（关闭 AscendC，走 npu_native_edge_softmax）
        print("  【测试】NPU 原生实现 (native Python)...")
        os.environ['USE_ASCENDC_EDGE_SOFTMAX'] = '0'
        # 预热
        _ = edge_softmax(pyg_graph, edge_weights)
        torch.npu.synchronize()

        times = []
        for run_idx in range(num_runs):
            start = time.time()
            _ = edge_softmax(pyg_graph, edge_weights)
            torch.npu.synchronize()
            end_time = time.time() - start
            times.append(end_time)
            print(f"    运行 {run_idx+1}/{num_runs}: {end_time*1000:.4f}ms")
        avg_time = np.mean(times) * 1000
        print(f"    【结果】平均时间: {avg_time:.4f}ms")
        row.append(f"{avg_time:.4f}")

        # 恢复环境
        os.environ['USE_ASCENDC_EDGE_SOFTMAX'] = orig_env
        
        table_data.append(row)
    
    # 输出结果表格
    headers = ["节点数", "边数", "特征维度", "AscendC(ms)", "NPU-native(ms)"]
    print("\n" + "=" * 80)
    print("【性能基准测试结果】:")
    print(tabulate(table_data, headers=headers, tablefmt="grid"))
    print("=" * 80)

if __name__ == "__main__":
    device = 'npu:0'  # 默认使用NPU:0
    run_benchmark(device) 