"""
边特征聚合操作

提供与DGL兼容的边特征聚合操作，使用PyG作为后端
"""

import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu
import sys
import os

# 添加路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(os.path.dirname(current_dir))
if parent_dir not in sys.path:
    sys.path.insert(0, parent_dir)

from ops.src.graph.graph import is_dgl_graph

def copy_e_sum(graph, edge_features, use_sparse=False, use_bincount=False):
    """
    将边特征聚合到目标节点
    
    参数:
        graph: Graph对象或DGL图
        edge_features: 边特征张量
        use_sparse (bool): 是否使用稀疏矩阵乘法进行优化
        use_bincount (bool): 是否使用bincount进行优化
    
    返回:
        聚合后的节点特征
    """
    # print("===================copy_e_sum===================")
    # print(f"Input edge_features shape: {edge_features.shape}")
    # 检查图是否为DGL图
    if is_dgl_graph(graph):
        # DGL图
        import dgl.function as fn
        graph = graph.local_var()
        graph.edata['e'] = edge_features
        graph.update_all(fn.copy_e('e', 'm'), fn.sum('m', 'out'))
        result = graph.ndata['out']
        # print(f"DGL output shape: {result.shape}")
        return result
    else:
        # PyG风格的Graph
        if use_sparse:
            device = edge_features.device
            # 如果在NPU上，并且稀疏操作不受支持，则回退到CPU执行
            if device.type == 'npu':
                # 记录原始设备
                original_device = device
                
                # 将计算所需的数据转移到CPU
                cpu_edge_features = edge_features.to('cpu')
                cpu_indices = graph.edge_index.to('cpu')
                num_nodes = graph.num_nodes() if callable(graph.num_nodes) else graph.num_nodes
                
                # 在CPU上执行稀疏矩阵乘法
                num_edges = cpu_edge_features.shape[0]
                original_shape = cpu_edge_features.shape
                if cpu_edge_features.dim() > 2:
                    cpu_edge_features = cpu_edge_features.reshape(num_edges, -1)
                
                values = torch.ones(num_edges, device='cpu')
                adj = torch.sparse_coo_tensor(cpu_indices.flip(0), values, (num_nodes, num_edges))
                cpu_result = torch.sparse.mm(adj, cpu_edge_features)

                if len(original_shape) > 2:
                    new_shape = [num_nodes] + list(original_shape[1:])
                    cpu_result = cpu_result.reshape(new_shape)
                
                # 将结果移回原始设备
                return cpu_result.to(original_device)

            # 在支持的设备（如CUDA/CPU）上正常执行
            # 使用稀疏矩阵乘法进行优化
            # 这种方法在某些后端（如NPU）上可能更快
            num_nodes = graph.num_nodes() if callable(graph.num_nodes) else graph.num_nodes
            num_edges = edge_features.shape[0]
            
            # 保存原始形状
            original_shape = edge_features.shape
            if edge_features.dim() > 2:
                edge_features = edge_features.reshape(num_edges, -1)
            
            # 创建稀疏矩阵
            indices = graph.edge_index
            values = torch.ones(num_edges, device=edge_features.device)
            # 交换src和dst，因为我们需要根据dst聚合到src
            adj = torch.sparse_coo_tensor(indices.flip(0), values, (num_nodes, num_edges))

            # 执行稀疏矩阵乘法
            result = torch.sparse.mm(adj, edge_features)

            # 恢复原始形状
            if len(original_shape) > 2:
                new_shape = [num_nodes] + list(original_shape[1:])
                result = result.reshape(new_shape)
            return result
        
        if use_bincount:
            # 使用bincount进行优化，这在某些后端上可能比index_add更快
            dst = graph.edge_index[1]
            num_nodes = graph.num_nodes() if callable(graph.num_nodes) else graph.num_nodes
            
            # bincount只接受1D权重，所以我们需要展平多维特征并逐一处理
            original_shape = edge_features.shape
            if edge_features.dim() > 1:
                features_2d = edge_features.view(original_shape[0], -1)
                num_features = features_2d.shape[1]
                
                # 为每个特征维度调用bincount
                output_features = torch.zeros(num_nodes, num_features, dtype=edge_features.dtype, device=edge_features.device)
                for i in range(num_features):
                    output_features[:, i] = torch.bincount(dst, weights=features_2d[:, i], minlength=num_nodes)
                
                # 恢复原始形状
                if len(original_shape) > 2:
                    new_shape = [num_nodes] + list(original_shape[1:])
                    return output_features.view(new_shape)
                return output_features
            else:
                # 如果特征已经是1D，直接使用bincount
                return torch.bincount(dst, weights=edge_features, minlength=num_nodes)

        dst = graph.edge_index[1]
        # print(f"Destination nodes (dst) shape: {dst.shape}")
        
        # 确保num_nodes是整数
        if hasattr(graph, 'num_nodes'):
            if callable(graph.num_nodes):
                num_nodes = graph.num_nodes()
            else:
                num_nodes = graph.num_nodes
        else:
            # 如果没有num_nodes属性，则从边索引中推断
            num_nodes = int(dst.max().item() + 1)
        
        # print(f"Number of nodes: {num_nodes}")
            
        dtype = edge_features.dtype
        device = edge_features.device
        
        # 使用高效的PyTorch原生操作替代Python循环
        # 创建输出张量
        if edge_features.dim() == 1:
            result = torch.zeros(int(num_nodes), dtype=dtype, device=device)
        else:
            output_shape = [int(num_nodes)] + [int(s) for s in edge_features.shape[1:]]
            result = torch.zeros(output_shape, dtype=dtype, device=device)
        
        # print(f"Result tensor shape before aggregation: {result.shape}")
        
        # 使用index_add_进行高效聚合（比Python循环快100倍以上）
        result.index_add_(0, dst, edge_features)
        
        # print(f"Result tensor shape after aggregation: {result.shape}")
        # print("=====================copy_e_sum_===========================")
        return result 