import os
import csv
import torch
import argparse
from tqdm import tqdm

import warnings
warnings.filterwarnings("ignore", category=Warning)

def process_edges(data, output_path, chunk_size=100000):
    """处理大规模边数据"""

    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    
    edge_index = data['edge_index'].contiguous()
    num_edges = edge_index.size(1)
    
    with open(output_path, 'w', newline='') as f:

        writer = csv.writer(f)
        writer.writerow([":START_ID", ":END_ID", ":TYPE"])
        
        for start in tqdm(range(0, num_edges, chunk_size), desc="Processing Edges"):
    
            end = min(start + chunk_size, num_edges)
            chunk = edge_index[:, start:end].t().cpu().numpy()
            
            for row in chunk:
                writer.writerow([row[0], row[1], "CONNECTED_TO"])
            
            del chunk

            torch.cuda.empty_cache() if torch.cuda.is_available() else None

if __name__ == "__main__":

    graph_name = "Yelp"

    parser = argparse.ArgumentParser(description='Process edge data')

    parser.add_argument('--input', type=str, default=f'../data/{graph_name}_data.pt',
                       help='Input data path')
    
    parser.add_argument('--output', type=str, default=f'./{graph_name}_edges.csv',
                       help='Output CSV path')
    
    parser.add_argument('--chunk_size', type=int, default=100000,
                       help='Number of edges per processing chunk')

    args = parser.parse_args()

    data = torch.load(args.input)[0]

    process_edges(data, args.output, args.chunk_size)

    print(f"============数据集{graph_name}的edges.csv文件生成完毕============")