from transformers import BertModel, BertTokenizer
import warnings
import torch
import networkx as nx
import os
from torch_geometric.utils import from_networkx
from pathlib import Path
from functools import partial
import torch.multiprocessing as mp

warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 路径
BERT_PATH = '/data/user5/bert'

# 方法1: 在代码开头指定默认设备
#torch.cuda.set_device(3)  # 使用GPU1

def process_burst(burst_string, bert, tokenizer, device, max_chunk_size=63):
    hex_list = burst_string.split()
    current_vectors = []
    
    for i in range(0, len(hex_list), max_chunk_size):
        chunk = hex_list[i:i+max_chunk_size]
        chunk_string = " ".join(chunk)
        encoded = tokenizer(chunk_string, padding=True, truncation=True, max_length=512, return_tensors='pt')
        encoded = {k: v.to(device) for k, v in encoded.items()}
        with torch.no_grad():
            outputs = bert(**encoded)
            node_vector = outputs.last_hidden_state[:, 0, :]
            current_vectors.append(node_vector)
            
            del outputs
            encoded_keys = list(encoded.keys())
            for k in encoded_keys:
                del encoded[k]
            del encoded
            torch.cuda.empty_cache()
            
    node_vectors = torch.cat(current_vectors, dim=0)
    mean_node_vector = node_vectors.mean(dim=0)
    return mean_node_vector

def get_graph_data(graph_file, output_file, bert, tokenizer, device):
    # 加载图
    G = nx.read_gexf(graph_file)

    # 初始化节点特征矩阵
    num_nodes = G.number_of_nodes()
    embedding_dim = 768
    node_features = torch.zeros((num_nodes, embedding_dim), dtype=torch.float32, device='cpu')

    # 确保所有节点都有相应的属性
    for node in G.nodes():
        if 'params' not in G.nodes[node]:
            G.nodes[node]['params'] = ''
        if 'burst' not in G.nodes[node]:
            G.nodes[node]['burst'] = ''

    # 按照节点顺序生成特征
    for idx, node in enumerate(G.nodes()):
        params = G.nodes[node].get('params', None)
        burst = G.nodes[node].get('burst', None)
        
        if params:
            param_list = params.split()
            param_vectors = []
            for param in param_list:
                encoded = tokenizer(param, padding=True, truncation=True, max_length=512, return_tensors='pt')
                encoded = {k: v.to(device) for k, v in encoded.items()}
                with torch.no_grad():
                    outputs = bert(**encoded)
                    current_vector = outputs.last_hidden_state[:, 0, :].clone()
                    param_vectors.append(current_vector)
                    
                    del outputs
                    encoded_keys = list(encoded.keys())
                    for k in encoded_keys:
                        del encoded[k]
                    del encoded
                    torch.cuda.empty_cache()
                    
            node_features[idx] = torch.mean(torch.cat(param_vectors, dim=0), dim=0)
        
        elif burst:
            node_features[idx] = process_burst(burst, bert, tokenizer, device)

    # 转换为 PyTorch 张量并保存
    data = from_networkx(G)
    data.x = node_features
    torch.save(data, output_file)
    
def filter_processed_files(graph_files, output_dir):
    """过滤已处理过的文件"""
    processed_files = []
    for graph_file in graph_files:
        output_file = Path(output_dir) / Path(graph_file).with_suffix(".pt").name
        if output_file.exists():
            processed_files.append(graph_file)
    return [f for f in graph_files if f not in processed_files]

def process_files(gpu_id, graph_files, output_dir, tasks_per_gpu=2):
    """每个 GPU 处理多个并行任务"""
    available_gpus = [1, 2]  # 明确指定使用的GPU
    device = torch.device(f'cuda:{available_gpus[gpu_id]}')
    print(f"GPU {available_gpus[gpu_id]} 使用的设备: {device}")
    
    # 修改任务分配逻辑
    num_gpus = len(available_gpus)  # 实际使用的GPU数量
    total_tasks = num_gpus * tasks_per_gpu
    files_per_task = len(graph_files) // total_tasks
    
    # 计算当前GPU负责的文件范围
    start_idx = gpu_id * files_per_task * tasks_per_gpu
    end_idx = start_idx + files_per_task * tasks_per_gpu
    if gpu_id == num_gpus - 1:  # 最后一个GPU处理剩余的所有文件
        end_idx = len(graph_files)
        
    assigned_files = graph_files[start_idx:end_idx]
    print(f"GPU {available_gpus[gpu_id]} 分配到 {len(assigned_files)} 个文件")
    
    # 将分配到的文件划分为tasks_per_gpu份
    sub_batch_size = len(assigned_files) // tasks_per_gpu
    sub_batches = [assigned_files[i:i + sub_batch_size] for i in range(0, len(assigned_files), sub_batch_size)]
    
    # 创建子进程处理每个子任务
    processes = []
    for task_id in range(tasks_per_gpu):
        if task_id < len(sub_batches):
            p = mp.Process(target=process_batch, 
                          args=(sub_batches[task_id], available_gpus[gpu_id], output_dir, task_id))
            p.start()
            processes.append(p)
    
    # 等待所有子任务完成
    for p in processes:
        p.join()

def process_batch(files, gpu_id, output_dir, task_id):
    """处理一个子任务的文件批次"""
    device = torch.device(f'cuda:{gpu_id}')
    tokenizer = BertTokenizer.from_pretrained(BERT_PATH)
    bert = BertModel.from_pretrained(BERT_PATH).to(device)
    
    for graph_file in files:
        output_file = Path(output_dir) / Path(graph_file).with_suffix(".pt").name
        if not output_file.exists():
            print(f"GPU {gpu_id} 任务 {task_id} 开始处理文件: {graph_file}")
            get_graph_data(graph_file, output_file, bert, tokenizer, device)

if __name__ == '__main__':
    mp.set_start_method('spawn', force=True)  # 设置多进程启动方式为spawn
    
    dataset_path = Path("/data/user5/goodware")
    output_dir = Path("/data/user5/goodpt")
    graph_files = list(dataset_path.glob("*.gexf"))
    print(f"总共发现 {len(graph_files)} 个文件")

    # 过滤已处理的文件
    graph_files = filter_processed_files(graph_files, output_dir)
    print(f"过滤后剩余 {len(graph_files)} 个文件需要处理")

    if not graph_files:
        print("没有需要处理的文件，退出程序")
    else:
        available_gpus = [0,1,2,3]  # 指定使用的 GPU
        num_gpus = len(available_gpus)
        print(f"发现 {num_gpus} 个可用 GPU")

        # 使用多进程，每个GPU运行多个任务
        mp.spawn(
            fn=partial(process_files, 
                      graph_files=graph_files, 
                      output_dir=output_dir,
                      tasks_per_gpu=2),  # 每个GPU运行2个任务
            nprocs=num_gpus,
            join=True
        )

