import os
import torch
from Topology_simulation import topu_G
from G_embedding import Graph_embedding
from auto_label import auto_label
from auto_label.auto_mask import auto_mask


# 初始化嵌入处理器
def generator(num_batches, num_nodes,edges_per_new_node,zoom_levels, model='BA', **kwargs):
    embedding_processor = Graph_embedding.NmapGraphProcessorJSON()

    # 训练数据保存目录
    output_dir = "training_data"
    os.makedirs(output_dir, exist_ok=True)
    json_dir = "./Topology_simulation/Topu_data"
    os.makedirs(json_dir, exist_ok=True)
    all_data_kmeans = []
    all_data_thr = []
    all_data_dbscan = []
    all_data_gmm =[]
    all_data_combine = []

    for i in range(num_batches):
        try:
            # 生成拓扑
            gen_data_name = f"./{json_dir}/data_{i}.json"
            #print(f"Generating topology: {gen_data_name}")
            print(f"Generating topology: {gen_data_name} using model {model}")

            #topu_G.generate(num_nodes=num_nodes, edges_per_new_node=edges_per_new_node, gen_data_name=gen_data_name, zoom_levels=zoom_levels ,visual_enable=False)
            topu_G.generate(
                num_nodes=num_nodes,
                edges_per_new_node=edges_per_new_node,
                gen_data_name=gen_data_name,
                zoom_levels=zoom_levels,
                model=model,
                **kwargs  # 传递额外参数
            )

            # 加载嵌入数据并转换为 PyG 格式
            print(f"Processing embedding for {gen_data_name}...")
            graph_data = embedding_processor.load_data(gen_data_name)

            # 生成标签
            print(f"Generating labels for {gen_data_name}...")
            d1_kmeans = auto_label.generate_y_kmeans(graph_data.clone())
            d2_thr = auto_label.generate_y_threshold(graph_data.clone())
            # d3_dbscan=auto_label.generate_y_dbscan(graph_data.clone())
            d4_gmm = auto_label.generate_y_gmm(graph_data.clone())
            d5_combine = auto_label.generate_y_combined(graph_data.clone(),zoom_levels=zoom_levels)

            # 保存训练数据集
            # save_path_kmeans = os.path.join(output_dir, f"train_kmeans_{i}.pt")
            # save_path_thr = os.path.join(output_dir, f"train_thr_{i}.pt")

            # print(f"Saving training dataset: {save_path_kmeans}, {save_path_thr}")
            # embedding_processor.visualize_graph(d1_kmeans,'kmeans')
            # embedding_processor.visualize_graph(d2_thr, 'thr')
            # embedding_processor.visualize_graph(d3_dbscan,'dbscan')
            # embedding_processor.visualize_graph(d4_gmm, 'gmm')
            # embedding_processor.visualize_graph(d5_combine, 'combine')
            # 收集所有数据
            all_data_kmeans.append(d1_kmeans)
            all_data_thr.append(d2_thr)
            # all_data_dbscan.append(d3_dbscan)
            all_data_gmm.append(d4_gmm)
            all_data_combine.append(d5_combine)

        except Exception as e:
            print(f"Error processing {gen_data_name}: {e}")
            continue

    #最终统一保存
    save_path_kmeans = os.path.join(output_dir, "train_kmeans.pt")
    save_path_thr = os.path.join(output_dir, "train_thr.pt")
    # save_path_dbscan = os.path.join(output_dir, "train_dbscan.pt")
    save_path_gmm = os.path.join(output_dir, "train_gmm.pt")
    save_path_combine = os.path.join(output_dir, "train_combine.pt")

    print(f"Saving all training datasets: {save_path_kmeans}, {save_path_thr}")
    embedding_processor.save_data(all_data_kmeans, save_path_kmeans)
    embedding_processor.save_data(all_data_thr, save_path_thr)
    # embedding_processor.save_data(all_data_dbscan, save_path_dbscan)
    embedding_processor.save_data(all_data_gmm, save_path_gmm)
    embedding_processor.save_data(all_data_combine, save_path_combine)
    print("All datasets generated successfully!")


    #加载
    data, slices = torch.load("training_data/train_kmeans.pt", weights_only=False)
    print(data)  # PyG 的 Data 对象

def mask_generator(num_batches, num_nodes, edges_per_new_node, zoom_levels, node_mask_rate=0.15, edge_mask_rate=0.25):
    #思路：生成图数据A时，同时生成他对应的mask数据A’
    #将mask过的边/节点作为 标签，用于后续的模型训练
    """
    生成成对的（完整图，残缺图）数据用于图补全任务的训练。

    参数:
    - - - -
    - node_mask_rate (float): 节点剪枝比例。
    - edge_mask_rate (float): 边移除比例。
    """
    embedding_processor = Graph_embedding.NmapGraphProcessorJSON()

    full_json_dir = "./Topology_simulation/Topu_data"
    os.makedirs(full_json_dir, exist_ok=True)
    output_dir = "./Topology_simulation/Mask_Topu_data"
    os.makedirs(output_dir, exist_ok=True)

    all_data_full = []
    all_data_masked = []

    # 循环生成成对的拓扑图
    for i in range(num_batches):
        gen_data_name = f"./{full_json_dir}/data_{i}.json"
        print(f"\n--- Batch {i + 1}/{num_batches} ---")

        try:
            print(f"Generating full topology: {gen_data_name}")
            topu_G.generate(num_nodes=num_nodes, edges_per_new_node=edges_per_new_node, gen_data_name=gen_data_name, zoom_levels=zoom_levels, visual_enable=False)

            print(f"Processing full graph embedding for  {gen_data_name}...")
            full_graph_data = embedding_processor.load_data(gen_data_name)
            all_data_full.append(full_graph_data)
            # pos = embedding_processor.visualize_graph(full_graph_data, 'full_graph')

            print(f"Creating masked version for {gen_data_name}...")
            masked_graph_data = auto_mask(full_graph_data,  node_mask_rate, edge_mask_rate)
            all_data_masked.append(masked_graph_data)
            # embedding_processor.visualize_graph(masked_graph_data, 'masked_graph', pos)

        except Exception as e:
            print(f"Error processing batch {i} with file {gen_data_name}: {e}")
            # import traceback
            # traceback.print_exc()
            continue

    # 最终统一保存
    save_path_full = os.path.join(output_dir, "train_full.pt")
    save_path_masked = os.path.join(output_dir, "train_masked.pt")

    print(f"\nSaving all generated datasets ...")
    print(f"Full graphs will be saved to: {save_path_full}")
    print(f"Masked graphs will be saved to: {save_path_masked}")

    # 先不合并，便于调整
    torch.save(all_data_full, save_path_full)
    torch.save(all_data_masked, save_path_masked)

    # embedding_processor.save_data(all_data_full, save_path_full)
    # embedding_processor.save_data(all_data_masked, save_path_masked)

    print("\nAll datasets generated successfully!")

    # 加载查看
    print("\nExample of loading the saved data:")
    try:
        loaded_masked_list = torch.load(save_path_masked,weights_only=False)
        loaded_full_list = torch.load(save_path_full,weights_only=False)
        print(f"Loaded {len(loaded_masked_list)} masked graphs from file.")
        print(f"Loaded {len(loaded_full_list)} full graphs from file.")

        if loaded_masked_list:
            first_masked_graph = loaded_masked_list[0]
            first_full_graph = loaded_full_list[0]

            print("\n--- Sample1 ---")
            print("Masked Graph:")
            print(first_masked_graph)
            print("\nFull Graph (Label for LOSS 1):")
            print(first_full_graph)
            print("\nMasked Info (Labels for LOSS 2 & 3):")
            print(first_masked_graph.masked_info)

        else:
            print("\nNo graphs found in the loaded data to display.")

    except Exception as e:
        print(f"An error occurred during the loading example phase: {e}")


if __name__ == '__main__':
    # 设置批量参数
    num_batches = 3  # 生成10个数据集
    num_nodes = 128  # 每个图64个节点
    edges_per_new_node = 1
    zoom_levels = 2
    # generator(num_batches, num_nodes, edges_per_new_node, zoom_levels)
    import warnings
    warnings.filterwarnings("ignore")
    mask_generator(
        num_batches=3,
        num_nodes=128,
        edges_per_new_node=1,
        zoom_levels=None,
        node_mask_rate=0.25,
        edge_mask_rate=0.35
    )