import networkx as nx
import random
import matplotlib.pyplot as plt
import community
import numpy as np
# import netbone as nb
import pandas as pd
# import seaborn as sns
from scipy import integrate
import scipy.linalg as sl

corange = (229 / 255, 108 / 255, 91 / 255)


# 读取csv文件中的邻接矩阵，并根据邻接矩阵生成Graph
def get_csv_matrix(filename):
    pd_matrix = pd.read_csv(filename, header=0)
    G = nx.from_numpy_array(np.array(pd_matrix))
    return G
    pass


def get_txt_edgelist(file_path):
    G = nx.Graph()  # 创建一个无向图（如果是有向图，使用nx.DiGraph()）
    with open(file_path, 'r') as file:
        for line in file:
            # 去除行尾的换行符，并按空格分割节点和权重
            node1, node2, weight = line.strip().split(' ')
            # 将权重转换为浮点数（或整数，取决于你的文件）
            weight = float(weight)
            # 将带权重的边添加到图中
            G.add_edge(node1, node2, weight=weight)
    return G


def get_eigen(G):
    laplac_matrix = nx.laplacian_matrix(G)  # 返回拉普拉斯矩阵构成的边集列表
    laplac_matrix = laplac_matrix.toarray()  # 将边集列表转换为邻接矩阵
    eigenvalues, eigenvectors = np.linalg.eigh(laplac_matrix)
    return eigenvalues, eigenvectors
    pass


def plot_eigvals_corrspond_eigvec(befor_filename, after_filename, number):
    before_G = get_csv_matrix(befor_filename)
    before_eigvals, before_eigvec = get_eigen(before_G)
    # print("Before:", before_eigvals)

    after_G = get_csv_matrix(after_filename)
    after_eigvals, after_eigvec = get_eigen(after_G)
    # print("After:", after_eigvals)

    x = range(1, len(before_eigvec[1]) + 1, 1)
    plt.plot(before_eigvec[1], before_eigvec[2], label='before_eigvec', linewidth=2.0, color='r', marker='o',
             markersize='5')
    plt.plot(after_eigvec[1], after_eigvec[2], label='after_eigvec', linewidth=2.0, color='b', marker='o',
             markersize='5')

    plt.rcParams['savefig.dpi'] = 600
    plt.show()
    pass


#   广义特征值与广义特征向量
def get_generalized_eigenvalue(origin_G, mst_G):
    # 计算拉普拉斯矩阵并转换为密集矩阵
    laplac_matrix_origin = nx.laplacian_matrix(origin_G)
    laplac_matrix_origin = laplac_matrix_origin.toarray()
    # print(type(laplac_matrix_origin))
    laplac_matrix_mst = nx.laplacian_matrix(mst_G)
    laplac_matrix_mst = laplac_matrix_mst.toarray()  # 将边集列表转换为邻接矩阵
    if mst.number_of_edges() == 36:
        np.savetxt("origin_mat.csv", laplac_matrix_origin, delimiter=",")
        np.savetxt("mst_mat.csv", laplac_matrix_mst, delimiter=",")
    # print(laplac_matrix_mst)
    [e, vector] = sl.eig(laplac_matrix_origin, laplac_matrix_mst)  # 求解广义特征值和广义特征向量: https://vimsky.com/examples/usage/python-scipy.linalg.eig.html#:~:text=%E7%94%A8%E6%B3%95:%20sc
    # e包含特征值，v包含对应的特征向量
    e.sort()
    # print("特征值:", e)
    # print("特征向量:\n", vector)
    return e
    pass


# NULL model计算
def disparity_filter(G, weight='weight'):
    if nx.is_directed(G):  # directed case
        N = nx.DiGraph()
        for u in G:

            k_out = G.out_degree(u)
            k_in = G.in_degree(u)

            if k_out > 1:
                sum_w_out = sum(np.absolute(G[u][v][weight]) for v in G.successors(u))
                for v in G.successors(u):
                    w = G[u][v][weight]
                    p_ij_out = float(np.absolute(w)) / sum_w_out
                    alpha_ij_out = 1 - (k_out - 1) * integrate.quad(lambda x: (1 - x) ** (k_out - 2), 0, p_ij_out)[0]
                    N.add_edge(u, v, weight=w, alpha_out=float('%.4f' % alpha_ij_out))

            elif k_out == 1 and G.in_degree(G.successors(u)[0]) == 1:
                # we need to keep the connection as it is the only way to maintain the connectivity of the network
                v = G.successors(u)[0]
                w = G[u][v][weight]
                N.add_edge(u, v, weight=w, alpha_out=0., alpha_in=0.)
                # there is no need to do the same for the k_in, since the link is built already from the tail

            if k_in > 1:
                sum_w_in = sum(np.absolute(G[v][u][weight]) for v in G.predecessors(u))
                for v in G.predecessors(u):
                    w = G[v][u][weight]
                    p_ij_in = float(np.absolute(w)) / sum_w_in
                    alpha_ij_in = 1 - (k_in - 1) * integrate.quad(lambda x: (1 - x) ** (k_in - 2), 0, p_ij_in)[0]
                    N.add_edge(v, u, weight=w, alpha_in=float('%.4f' % alpha_ij_in))
        return N

    else:  # undirected case
        B = nx.Graph()
        for u in G:
            k = len(G[u])
            if k > 1:
                sum_w = sum(np.absolute(G[u][v][weight]) for v in G[u])
                for v in G[u]:
                    w = G[u][v][weight]
                    p_ij = float(np.absolute(w)) / sum_w
                    alpha_ij = 1 - (k - 1) * integrate.quad(lambda x: (1 - x) ** (k - 2), 0, p_ij)[0]
                    B.add_edge(u, v, weight=w, alpha=float('%.4f' % alpha_ij))
        return B


# 四分数计算
def get_iqr_data(datas):
    q1 = np.quantile(datas, 0.25)
    q2 = np.median(datas)
    q3 = np.quantile(datas, 0.75)
    iqr = q3 - q1
    down = q1 - 1.5 * iqr
    up = q3 + 1.5 * iqr
    return [q1, q2, q3, iqr, down, up]


G = get_csv_matrix('Shearon Harris RAB(Reactor Auxiliary Building) Systems.csv')
# alpha = 0
G = disparity_filter(G)
# G2 = nx.Graph([(u, v, d) for u, v, d in G.edges(data=True) if d['alpha'] < alpha])
# print('alpha = %s' % alpha)
print('original: nodes = %s, edges = %s' % (G.number_of_nodes(), G.number_of_edges()))
# print('backbone: nodes = %s, edges = %s' % (G2.number_of_nodes(), G2.number_of_edges()))
# print(G2.edges(data=True))

#   计算四分数    ######
# 遍历图的所有边，并提取权重
alpha0 = []
for u, v, d in G.edges(data=True):
    alpha_ = d['alpha']  # 从边的属性中获取权重
    alpha0.append(alpha_)
    pass
# 打印四分数前25%
print(get_iqr_data(alpha0))

# 最小生成树
mst = nx.tree.minimum_spanning_tree(G, weight='alpha')
# mst = nx.tree.maximum_spanning_tree(G,weight='weight')
# # 初始化权重之和为0
total_weight = 0
# 遍历图中的每一条边，并累加权重
# for u, v, w in mst.edges(data='weight'):
#     total_weight += w
# print('total_weight = %s' % total_weight)
# nx.draw(mst, with_labels=True)
# plt.show()
print(mst.number_of_edges())  # 打印mst的边数

x = []
y = []
old_num_edges = 0
for s in range(0, 100):
    # 向mst中添加边
    for u, v, d in G.edges(data=True):
        if d['alpha'] < float(s)/100:  # 调节这个值可以向MST中添加边 0.56
            mst.add_edge(u, v, **d)
        #pass

    if mst.number_of_edges() == old_num_edges:
        continue
    print(mst.number_of_edges())
    old_num_edges = mst.number_of_edges()
    eigenval = get_generalized_eigenvalue(G, mst)
    if eigenval[len(eigenval) - 1].real > 100000:
        print('相对条件数: k = %s' % (eigenval[len(eigenval) - 2] / eigenval[0]))
        y.append(eigenval[len(eigenval) - 2] / eigenval[0])
    else:
        print('相对条件数: k = %s' % (eigenval[len(eigenval) - 1] / eigenval[0]))
        y.append(eigenval[len(eigenval) - 1] / eigenval[0])
    x.append(mst.number_of_edges())

    if mst.number_of_edges() == 32:
        nx.write_edgelist(mst, "test-edgelist.csv", data=True)  # 生成edgelist
# 创建一个DataFrame，索引和列都是节点列表
# 初始值都设为0，表示没有边
# nodes = list(mst.nodes())
# adj_matrix = pd.DataFrame(0, index=nodes, columns=nodes)

# 遍历图的边
# for u, v, w in mst.edges(data=True):
#     adj_matrix.at[u, v] = w['weight']
#     if not G.is_directed():  # 如果图是无向的，还需要设置反向的边
#         adj_matrix.at[v, u] = w['weight']
#
#     # 将邻接矩阵保存到CSV文件
# adj_matrix.to_csv('adjmatrix.csv')

# 使用Louvain算法进行社区发现
# partition = community.best_partition(mst)
# print(partition)


# nx.draw(mst, node_size=150,  node_color=corange, linewidths=0.7,with_labels=True, edgecolors="black", font_family='Times New Roman',
#         edge_color=[d['weight'] for _, _, d in mst.edges(data=True)], width=2, edge_cmap=plt.cm.Oranges)
# plt.show()
plt.plot(x, y)
plt.show()
plt.savefig("a.png")

# print(mst.number_of_edges())
# eigenval = get_generalized_eigenvalue(G, mst)
# print('相对条件数: k = %s' % (eigenval[len(eigenval) - 2] / eigenval[0]))

# sns.distplot(alpha0, bins=16, kde=False, hist_kws={'color': 'steelblue'},
#              label=('Louvain', '直方图'), norm_hist=True)
#
# plt.xlabel('Alpha')
# plt.ylabel('Frequence')
# # 添加标题
# plt.title('Alpha hist')
# plt.show()


# 获取图中所有节点的列表
# nodes = list(G2.nodes())

# 创建一个DataFrame，索引和列都是节点列表
# 初始值都设为0，表示没有边
# adj_matrix = pd.DataFrame(0, index=nodes, columns=nodes)

# 遍历图的边
# for u, v, w in G2.edges(data=True):
#     adj_matrix.at[u, v] = w['weight']
#     if not G.is_directed():  # 如果图是无向的，还需要设置反向的边
#         adj_matrix.at[v, u] = w['weight']
#
#     # 将邻接矩阵保存到CSV文件
# adj_matrix.to_csv('adjacency_matrix.csv')


# # 将列表转换为NumPy数组
# my_array = np.array(alpha0)
#
# # 计算均值（平均值）
# mean = np.mean(my_array)
#
# # 计算方差
# variance = np.var(my_array)
#
# # 输出结果
# print("均值:", mean)
# print("方差:", variance)

# def disparity_filter_alpha_cut(G, weight='weight', alpha_t=0.4, cut_mode='or'):
#     ''' Performs a cut of the graph previously filtered through the disparity_filter function.
#
#         Args
#         ----
#         G: Weighted NetworkX graph
#
#         weight: string (default='weight')
#             Key for edge data used as the edge weight w_ij.
#
#         alpha_t: double (default='0.4')
#             The threshold for the alpha parameter that is used to select the surviving edges.
#             It has to be a number between 0 and 1.
#
#         cut_mode: string (default='or')
#             Possible strings: 'or', 'and'.
#             It works only for directed graphs. It represents the logic operation to filter out edges
#             that do not pass the threshold value, combining the alpha_in and alpha_out attributes
#             resulting from the disparity_filter function.
#
#
#         Returns
#         -------
#         B: Weighted NetworkX graph
#             The resulting graph contains only edges that survived from the filtering with the alpha_t threshold
#
#         References
#         ---------
#         .. M. A. Serrano et al. (2009) Extracting the Multiscale backbone of complex weighted networks. PNAS, 106:16, pp. 6483-6488.
#     '''
#
#     if nx.is_directed(G):  # Directed case:
#         B = nx.DiGraph()
#         for u, v, w in G.edges(data=True):
#             try:
#                 alpha_in = w['alpha_in']
#             except KeyError:  # there is no alpha_in, so we assign 1. It will never pass the cut
#                 alpha_in = 1
#             try:
#                 alpha_out = w['alpha_out']
#             except KeyError:  # there is no alpha_out, so we assign 1. It will never pass the cut
#                 alpha_out = 1
#
#             if cut_mode == 'or':
#                 if alpha_in < alpha_t or alpha_out < alpha_t:
#                     B.add_edge(u, v, weight=w[weight])
#             elif cut_mode == 'and':
#                 if alpha_in < alpha_t and alpha_out < alpha_t:
#                     B.add_edge(u, v, weight=w[weight])
#         return B
#
#     else:
#         B = nx.Graph()  # Undirected case:
#         for u, v, w in G.edges(data=True):
#
#             try:
#                 alpha = w['alpha']
#             except KeyError:  # there is no alpha, so we assign 1. It will never pass the cut
#                 alpha = 1
#
#             if alpha < alpha_t:
#                 B.add_edge(u, v, weight=w[weight])
#         return B
