# import matplotlib.pyplot as plt
# import numpy as np
#
# # 数据
# baselines = ['BERT', 'RRRE', 'shortname']  # 删去 GCN 和 SLCN
# datasets = ['Video_Games', 'CDs_and_Vinyl', 'Digital_Music']
#
# # NDCG 数据（删去 GCN 和 SLCN）
# ndcg_data = {
#     'Video_Games': [0.6687, 0.8101, 0.8654],
#     'CDs_and_Vinyl': [0.7926, 0.8734, 0.9002],
#     'Digital_Music': [0.7815, 0.8415, 0.8926]
# }
#
# # MRR 数据（删去 GCN 和 SLCN）
# mrr_data = {
#     'Video_Games': [0.1041, 0.1587, 0.1702],
#     'CDs_and_Vinyl': [0.1204, 0.1469, 0.1619],
#     'Digital_Music': [0.1197, 0.1341, 0.1522]
# }
#
# # 使用指定的颜色
# colors = ['#7E99F4', '#CC7C71', '#7AB656']
#
# # 绘制NDCG柱状图
# x = np.arange(len(baselines))  # x轴位置
# width = 0.2  # 柱状图宽度
#
# fig, ax = plt.subplots(figsize=(8, 5))  # 调整画布大小
#
# for i, dataset in enumerate(datasets):
#     ax.bar(x + i * width, ndcg_data[dataset], width, label=dataset, color=colors[i])
#
# # 设置纵坐标范围（从最小值减去0.05开始，避免从0开始）
# ax.set_ylim(min(min(v) for v in ndcg_data.values()) - 0.05, max(max(v) for v in ndcg_data.values()) + 0.05)
#
# ax.set_xlabel(' ', fontsize=12)
# ax.set_ylabel('NDCG', fontsize=12)
# ax.set_title('NDCG Performance on Different Datasets', fontsize=14)
# ax.set_xticks(x + width)
# ax.set_xticklabels(baselines, fontsize=11)
# ax.legend( fontsize=10)  # 设置图例
# ax.grid(axis='y', linestyle='--', alpha=0.7)
#
# # 添加合并后的图片说明
# # fig.suptitle("Comparison of NDCG and MRR performance across three datasets (Video_Games, CDs_and_Vinyl, and Digital_Music) for different baselines.",
#              # y=0.02, fontsize=12, verticalalignment='bottom')
# #
# plt.tight_layout()
# plt.show()
#
# # 绘制MRR柱状图
# fig, ax = plt.subplots(figsize=(8, 5))  # 调整画布大小
#
# for i, dataset in enumerate(datasets):
#     ax.bar(x + i * width, mrr_data[dataset], width, label=dataset, color=colors[i])
#
# # 设置纵坐标范围（从最小值减去0.01开始，避免从0开始）
# ax.set_ylim(min(min(v) for v in mrr_data.values()) - 0.01, max(max(v) for v in mrr_data.values()) + 0.01)
#
# ax.set_xlabel(' ', fontsize=12)
# ax.set_ylabel('MRR', fontsize=12)
# ax.set_title('MRR Performance on Different Datasets', fontsize=14)
# ax.set_xticks(x + width)
# ax.set_xticklabels(baselines, fontsize=11)
# ax.legend(fontsize=10)  # 设置图例
# ax.grid(axis='y', linestyle='--', alpha=0.7)
#
# # 添加合并后的图片说明
# # fig.suptitle("Comparison of NDCG and MRR performance across three datasets (Video_Games, CDs_and_Vinyl, and Digital_Music) for different baselines.",
# #              y=0.02, fontsize=12, verticalalignment='bottom')
#
# plt.tight_layout()
# plt.show()


#
# import scipy.io as sio
# import pickle
# import dgl
#
#
# # 加载 YelpChi 数据
# yelp_data = sio.loadmat('data/YelpChi.mat')
#
# # 查看数据的键
# print("YelpChi 数据的键:", yelp_data.keys())
#
# # 查看每个键对应的数据形状和类型
# for key in yelp_data.keys():
#     if not key.startswith('__'):
#         print(f"Key: {key}, Shape: {yelp_data[key].shape}, Type: {type(yelp_data[key])}")
#
# # 加载 Amazon 数据
# amazon_data = sio.loadmat('data/Amazon.mat')
#
# # 查看数据的键
# print("\nAmazon 数据的键:", amazon_data.keys())
#
# # 查看每个键对应的数据形状和类型
# for key in amazon_data.keys():
#     if not key.startswith('__'):
#         print(f"Key: {key}, Shape: {amazon_data[key].shape}, Type: {type(amazon_data[key])}")
#
#
# # 加载 YelpChi 的 homo 邻接列表
# with open('data/yelp_homo_adjlists.pickle', 'rb') as f:
#     yelp_homo_adjlist = pickle.load(f)
#
# # 查看邻接列表的结构
# print("\nYelpChi homo 邻接列表的结构:")
# print(f"节点数量: {len(yelp_homo_adjlist)}")
# print(f"节点 0 的邻居: {list(yelp_homo_adjlist[0])[:10]}")  # 打印前 10 个邻居
#
# # 加载 Amazon 的 homo 邻接列表
# with open('data/amz_homo_adjlists.pickle', 'rb') as f:
#     amz_homo_adjlist = pickle.load(f)
#
# # 查看邻接列表的结构
# print("\nAmazon homo 邻接列表的结构:")
# print(f"节点数量: {len(amz_homo_adjlist)}")
# print(f"节点 0 的邻居: {list(amz_homo_adjlist[0])[:10]}")  # 打印前 10 个邻居
#
#
#
#
# # 加载 YelpChi 图
# yelp_graph = dgl.load_graphs('data/graph-yelp.bin')[0][0]
#
# # 查看图的结构
# print("\nYelpChi 图的结构:")
# print(f"节点数量: {yelp_graph.num_nodes()}")
# print(f"边数量: {yelp_graph.num_edges()}")
# print(f"节点特征形状: {yelp_graph.ndata['feat'].shape}")
# print(f"节点标签形状: {yelp_graph.ndata['label'].shape}")
#
# # 查看节点特征和标签的示例
# print("\n节点 0 的特征:", yelp_graph.ndata['feat'][0])
# print("节点 0 的标签:", yelp_graph.ndata['label'][0])
#
# # 加载 Amazon 图
# amazon_graph = dgl.load_graphs('data/graph-amazon.bin')[0][0]
#
# # 查看图的结构
# print("\nAmazon 图的结构:")
# print(f"节点数量: {amazon_graph.num_nodes()}")
# print(f"边数量: {amazon_graph.num_edges()}")
# print(f"节点特征形状: {amazon_graph.ndata['feat'].shape}")
# print(f"节点标签形状: {amazon_graph.ndata['label'].shape}")
#
# # 查看节点特征和标签的示例
# print("\n节点 0 的特征:", amazon_graph.ndata['feat'][0])
# print("节点 0 的标签:", amazon_graph.ndata['label'][0])


# 查看数据集mat文件内容
# from scipy.io import loadmat
# import os
#
# def print_mat_contents(file_path):
#     """
#     打印MAT文件中的内容
#     :param file_path: MAT文件的路径
#     """
#     try:
#         mat_data = loadmat(file_path)
#         print(f"Contents of {file_path}:")
#         for key, value in mat_data.items():
#             if not key.startswith('__'):  # 跳过MAT文件的内部键
#                 print(f"Key: {key}, Value type: {type(value)}, Shape: {value.shape if hasattr(value, 'shape') else 'N/A'}")
#                 print(f"Value: {value}")
#                 print("-" * 50)
#     except Exception as e:
#         print(f"Error loading {file_path}: {e}")
#
# # 设置数据目录
# DATADIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/")
#
# # 查看YelpChi数据集
# yelp_file = os.path.join(DATADIR, 'YelpChi.mat')
# print_mat_contents(yelp_file)
#
# # 查看Amazon数据集
# amazon_file = os.path.join(DATADIR, 'Amazon.mat')
# print_mat_contents(amazon_file)

from scipy.io import loadmat
import os
import numpy as np

from scipy.io import loadmat
import os
import numpy as np

from scipy.io import loadmat
import os
import numpy as np


def count_edges_in_mat(file_path):
    """
    统计MAT文件中包含的边的数量
    :param file_path: MAT文件的路径
    :return: 边的数量
    """
    try:
        mat_data = loadmat(file_path)
        edge_count = 0
        for key, value in mat_data.items():
            if not key.startswith('__'):  # 跳过MAT文件的内部键
                # if isinstance(value, np.ndarray):
                #     # 检查是否是稀疏矩阵
                if hasattr(value, 'nnz'):
                    edge_count += value.nnz
        return edge_count
    except Exception as e:
        print(f"Error loading {file_path}: {e}")
        return 0


def print_mat_contents_and_edge_count(file_path):
    """
    打印MAT文件中的内容并统计边的数量
    :param file_path: MAT文件的路径
    """
    try:
        mat_data = loadmat(file_path)
        print(f"Contents of {file_path}:")
        for key, value in mat_data.items():
            if not key.startswith('__'):  # 跳过MAT文件的内部键
                print(
                    f"Key: {key}, Value type: {type(value)}, Shape: {value.shape if hasattr(value, 'shape') else 'N/A'}")
                print(f"Value: {value}")
                print("-" * 50)

        edge_count = count_edges_in_mat(file_path)
        print(f"Total number of edges in {file_path}: {edge_count}")
    except Exception as e:
        print(f"Error loading {file_path}: {e}")


# 设置数据目录
DATADIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/")

# 查看YelpChi数据集并统计边的数量
yelp_file = os.path.join(DATADIR, 'YelpChi.mat')
print_mat_contents_and_edge_count(yelp_file)

# 查看Amazon数据集并统计边的数量
amazon_file = os.path.join(DATADIR, 'Amazon.mat')
print_mat_contents_and_edge_count(amazon_file)





#查看dgl图文件具体内容
# import dgl
# import torch
# import os
# import numpy as np
# import pandas as pd
#
#
# def print_dgl_graph_info(file_path, num_samples=10):
#     """
#     打印DGL图文件的信息，包括节点标签和节点特征的具体内容
#     :param file_path: DGL图文件的路径
#     :param num_samples: 打印的节点样本数量
#     """
#     try:
#         # 加载图文件
#         graphs, _ = dgl.load_graphs(file_path)
#         graph = graphs[0]
#         print(f"Graph information for {file_path}:")
#         print(f"Number of nodes: {graph.num_nodes()}")
#         print(f"Number of edges: {graph.num_edges()}")
#
#         # 打印节点特征信息
#         node_feats = graph.ndata
#         print("\nNode features:")
#         for key, value in node_feats.items():
#             print(f"Feature '{key}': shape={value.shape}, dtype={value.dtype}")
#
#             # 打印样本节点的特征值
#             if value.ndim == 1:
#                 print(f"Sample values of '{key}': {value[:num_samples]}")
#             else:
#                 print(f"Sample values of '{key}':\n{value[:num_samples]}")
#
#         # 打印边特征信息（如果有）
#         if graph.edata:
#             edge_feats = graph.edata
#             print("\nEdge features:")
#             for key, value in edge_feats.items():
#                 print(f"Feature '{key}': shape={value.shape}, dtype={value.dtype}")
#
#                 # 打印样本边的特征值
#                 if value.ndim == 1:
#                     print(f"Sample values of '{key}': {value[:num_samples]}")
#                 else:
#                     print(f"Sample values of '{key}':\n{value[:num_samples]}")
#         else:
#             print("\nNo edge features found.")
#
#         # 打印其他图信息
#         print("\nOther graph information:")
#         print(f"Is homogeneous: {graph.is_homogeneous}")
#         print(f"Is bipartite: {graph.is_bipartite}")
#         print(f"Is read-only: {graph.is_readonly}")
#
#         # 打印节点标签的分布
#         if 'label' in node_feats:
#             labels = node_feats['label']
#             unique_labels, counts = torch.unique(labels, return_counts=True)
#             print("\nLabel distribution:")
#             for label, count in zip(unique_labels, counts):
#                 print(f"Label {label}: {count} nodes")
#
#         # 打印节点特征的统计信息
#         if 'feat' in node_feats:
#             feats = node_feats['feat']
#             print("\nFeature statistics:")
#             print(f"Min value: {torch.min(feats)}")
#             print(f"Max value: {torch.max(feats)}")
#             print(f"Mean value: {torch.mean(feats)}")
#             print(f"Std value: {torch.std(feats)}")
#
#     except Exception as e:
#         print(f"Error loading {file_path}: {e}")
#
#
# # 设置数据目录
# DATADIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/")
#
# # 查看Yelp数据集的DGL图文件
# yelp_graph_file = os.path.join(DATADIR, "graph-yelp.bin")
# print_dgl_graph_info(yelp_graph_file)
#
# # 查看Amazon数据集的DGL图文件
# amazon_graph_file = os.path.join(DATADIR, "graph-amazon.bin")
# print_dgl_graph_info(amazon_graph_file)
#
# # # 查看S-FFSD数据集的DGL图文件
# # sffsd_graph_file = os.path.join(DATADIR, "graph-S-FFSD.bin")
# # print_dgl_graph_info(sffsd_graph_file)


# import os
# import numpy as np
# from scipy.io import loadmat
#
# def get_label_distribution(file_path, label_key='label'):
#     """
#     获取MAT文件中标签的分布情况
#     :param file_path: MAT文件的路径
#     :param label_key: 标签的键名，默认为'label'
#     :return: 标签的分布情况
#     """
#     try:
#         mat_data = loadmat(file_path)
#         labels = mat_data[label_key].flatten()
#         unique_labels, counts = np.unique(labels, return_counts=True)
#         distribution = dict(zip(unique_labels, counts))
#         return distribution
#     except Exception as e:
#         print(f"Error loading {file_path}: {e}")
#         return None
#
# # 设置数据目录
# DATADIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"data/")
#
# # 查看Yelp数据集的标签分布
# yelp_distribution = get_label_distribution(os.path.join(DATADIR, 'YelpChi.mat'))
# print(f"Yelp dataset label distribution: {yelp_distribution}")
