import warnings
warnings.filterwarnings("ignore", category=Warning)

import torch
import numpy as np
import os
import tempfile
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm
import logging

logging.basicConfig(filename='processing.log', level=logging.INFO)

def process_chunk(args):
    """优化后的分块处理函数"""

    try:
        chunk_idx, edge_chunk, temp_dir = args
        sorted_chunk = edge_chunk[edge_chunk[:, 0].argsort(kind='stable')]
        temp_path = os.path.join(temp_dir, f"chunk_{chunk_idx:08d}.npy")
        np.save(temp_path, sorted_chunk)
        return temp_path
    except Exception as e:
        logging.error(f"分块 {chunk_idx} 处理失败: {str(e)}")
        raise

def parallel_process(data_path, output_file, chunk_size=1_000_000, workers=4):
    """内存优化的并行处理"""

    data = torch.load(data_path)[0]
    edge_index = data['edge_index'].numpy().T  
    total_edges = edge_index.shape[0]
    temp_dir = tempfile.mkdtemp(prefix="graph_temp_")
    logging.info(f"临时目录创建于: {temp_dir}")
    
    chunks = []
    for i in range(0, total_edges, chunk_size):
        chunk = edge_index[i:i+chunk_size]
        chunks.append((i//chunk_size, chunk, temp_dir))
    
    with ProcessPoolExecutor(max_workers=min(workers, os.cpu_count())) as executor:
        futures = []
        for args in chunks:
            futures.append(executor.submit(process_chunk, args))
        
        chunk_paths = []
        for future in tqdm(futures, desc="分块处理"):
            try:
                chunk_paths.append(future.result())
            except Exception as e:
                logging.error(f"分块处理异常: {str(e)}")
                executor.shutdown(wait=False, cancel_futures=True)
                raise
    
    def merge_files(paths):
        """生成器式归并减少内存占用"""

        open_files = [np.load(p, mmap_mode='r') for p in paths]
        indices = [0] * len(open_files)
        
        while True:
            min_val = None
            min_idx = -1
            
            for i, (arr, ptr) in enumerate(zip(open_files, indices)):
                if ptr < len(arr):
                    current = arr[ptr][0]
                    if min_val is None or current < min_val:
                        min_val = current
                        min_idx = i
            
            if min_idx == -1:
                break
                
            yield open_files[min_idx][indices[min_idx]]
            indices[min_idx] += 1
            
            if indices[min_idx] >= len(open_files[min_idx]):
                open_files[min_idx]._mmap.close()
                os.remove(paths[min_idx])
                open_files.pop(min_idx)
                indices.pop(min_idx)
                paths.pop(min_idx)
    
    batch_size = 100 
    with open(output_file, 'w') as f:
        writer = f.write
        for i in tqdm(range(0, len(chunk_paths), batch_size), desc="归并批次"):
            batch_paths = chunk_paths[i:i+batch_size]
            for edge in merge_files(batch_paths):
                writer(f"{edge[0]}\t{edge[1]}\n")
    
    for f in os.listdir(temp_dir):
        os.remove(os.path.join(temp_dir, f))
    os.rmdir(temp_dir)

if __name__ == "__main__":
    graph_name = "AmazonProducts"

    parallel_process(
        data_path = f"../data/{graph_name}_data.pt",
        output_file = f"./{graph_name}.ungraph",
        chunk_size=2_000_000,  
        workers=12             
    )