import pickle
import random as rd
import numpy as np
import scipy.sparse as sp
from scipy.io import loadmat
import copy as cp
from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score, confusion_matrix
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.manifold import TSNE
import torch
import os
from sklearn.metrics import confusion_matrix
from torch_geometric.utils import to_dense_adj, subgraph
import torch.nn.functional as F
import torch.nn as nn
import time
import copy
from torch.utils.tensorboard import SummaryWriter
from sklearn.model_selection import train_test_split
import dgl
from tqdm import tqdm
from collections import defaultdict

filelist = {
    'amz_upu': 'amz_upu_adjlists.pickle',
    'amz_usu': 'amz_usu_adjlists.pickle',
    'amz_uvu': 'amz_uvu_adjlists.pickle',
    'yelp_rsr': 'yelp_rsr_adjlists.pickle',
    'yelp_rtr': 'yelp_rtr_adjlists.pickle',
    'yelp_rur': 'yelp_rur_adjlists.pickle'
}

file_matrix_prefix = {
    'amz_upu': 'amazon_upu_matrix_',
    'amz_usu': 'amazon_usu_matrix_',
    'amz_uvu': 'amazon_uvu_matrix_',
    'yelp_rsr': 'yelpnet_rsr_matrix_decompision_',
    'yelp_rtr': 'yelpnet_rtr_matrix_decompision_',
    'yelp_rur': 'yelpnet_rur_matrix_decompision_'
}

def sample_subgraph(batch_centers, dist_matrix, sample_size=20, temperature=0.1):
    """
        采样子图，包含中心节点及其邻居节点
        :param batch_centers: 中心节点的索引列表
        :param dist_matrix: 节点间距离矩阵
        :param sample_size: 每个中心节点采样的邻居数量
        :param temperature: 温度参数
        :return: 采样得到的子图节点索引列表
    """
    sub_nodes = set(batch_centers)
    for xi in batch_centers:
        dist_row = dist_matrix[xi].cpu().numpy()
        probs = np.exp(-dist_row / temperature)
        probs[xi] = 0
        probs /= probs.sum()
        neighbors = np.random.choice(len(dist_row), size=sample_size, p=probs, replace=False)
        sub_nodes.update(neighbors.tolist())
    sub_nodes = list(sub_nodes)
    return sub_nodes

def iterate_batches(indices, batch_size, shuffle=True):
    """
        将索引列表划分为指定大小的批次
    """
    if shuffle:
        rd.shuffle(indices)
    for i in range(0, len(indices), batch_size):
        yield indices[i:i + batch_size]

def calculate_g_mean(y_true, y_pred):
    cm = confusion_matrix(y_true, y_pred)
    TP = cm[1, 1]
    TN = cm[0, 0]
    FP = cm[0, 1]
    FN = cm[1, 0]
    sensitivity = TP / (TP + FN)
    specificity = TN / (TN + FP)
    g_mean = np.sqrt(sensitivity * specificity)
    return g_mean

def dict_to_edge_index(edge_dict):
    source_nodes = []
    target_nodes = []
    for src, targets in edge_dict.items():
        for target in targets:
            source_nodes.append(src)
            target_nodes.append(target)
    edge_index = [source_nodes, target_nodes]
    return torch.LongTensor(edge_index)

def numpy_array_to_edge_index(np_array):
    assert np_array.ndim == 2 and np_array.shape[0] == np_array.shape[1], "Input must be a square matrix."
    rows, cols = np.nonzero(np_array)
    edge_index = np.vstack((rows, cols))
    edge_index_tensor = torch.from_numpy(edge_index).long()
    return edge_index_tensor

def load_data(data, k=2, prefix=''):
    pickle_file = {}
    matrix_prefix = {}
    for key in filelist:
        pickle_file[key] = os.path.join(prefix, filelist[key])
        matrix_prefix[key] = os.path.join(prefix, file_matrix_prefix[key])

    if data == 'yelp':
        data_file = loadmat(os.path.join(prefix, 'YelpChi.mat'))
        labels = data_file['label'].flatten()
        feat_data = data_file['features'].todense().A
        with open(pickle_file['yelp_rur'], 'rb') as file:
            relation1 = pickle.load(file)
        file.close()
        relation1 = dict_to_edge_index(relation1)
        relation1_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rur'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation1_tree.append(numpy_array_to_edge_index(tree))
        with open(pickle_file['yelp_rtr'], 'rb') as file:
            relation2 = pickle.load(file)
        file.close()
        relation2 = dict_to_edge_index(relation2)
        relation2_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rtr'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation2_tree.append(numpy_array_to_edge_index(tree))
        with open(pickle_file['yelp_rsr'], 'rb') as file:
            relation3 = pickle.load(file)
        file.close()
        relation3 = dict_to_edge_index(relation3)
        relation3_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rsr'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation3_tree.append(numpy_array_to_edge_index(tree))
        return [[relation1, relation1_tree], [relation2, relation2_tree], [relation3, relation3_tree]], feat_data, labels
    elif data == 'amazon':
        data_file = loadmat(os.path.join(prefix, 'Amazon.mat'))
        labels = data_file['label'].flatten()
        feat_data = data_file['features'].todense().A
        with open(pickle_file['amz_upu'], 'rb') as file:
            relation1 = pickle.load(file)
        file.close()
        relation1 = dict_to_edge_index(relation1)
        relation1_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['amz_upu'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation1_tree.append(numpy_array_to_edge_index(tree))
        with open(pickle_file['amz_usu'], 'rb') as file:
            relation2 = pickle.load(file)
        file.close()
        relation2 = dict_to_edge_index(relation2)
        relation2_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['amz_usu'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation2_tree.append(numpy_array_to_edge_index(tree))
        with open(pickle_file['amz_uvu'], 'rb') as file:
            relation3 = pickle.load(file)
        file.close()
        relation3_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['amz_uvu'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation3_tree.append(numpy_array_to_edge_index(tree))
        relation3 = dict_to_edge_index(relation3)

        return [[relation1, relation1_tree], [relation2, relation2_tree], [relation3, relation3_tree]], feat_data, labels


def Visualization(labels, embedding, prefix):
    train_pos, train_neg = pos_neg_split(list(range(len(labels))), labels)
    sampled_idx_train = undersample(train_pos, train_neg, scale=1)
    tsne = TSNE(n_components=2, random_state=43)
    sampled_idx_train = np.array(sampled_idx_train)
    sampled_idx_train = np.random.choice(sampled_idx_train, size=5000, replace=True)
    ps = embedding[sampled_idx_train]
    ls = labels[sampled_idx_train]

    X_reduced = tsne.fit_transform(ps)

    scaler = MinMaxScaler(feature_range=(0, 1))
    X_scaled = scaler.fit_transform(X_reduced)
    print(X_scaled.shape)

    plt.figure(figsize=(8, 8))

    plt.scatter(X_scaled[ls == 0, 0], X_scaled[ls == 0, 1], c='#14517C', label='Label 0', s=3)

    plt.scatter(X_scaled[ls == 1, 0], X_scaled[ls == 1, 1], c='#FA7F6F', label='Label 1', s=3)

    ax = plt.gca()
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.spines['bottom'].set_visible(False)

    plt.xticks([])
    plt.yticks([])

    plt.xlim(0, 1)
    plt.ylim(0, 1)
    filepath = os.path.join(prefix, 'HOGRL.png')
    plt.savefig(filepath)
    plt.show()

def normalize(mx):
    rowsum = np.array(mx.sum(1)) + 0.01
    r_inv = np.power(rowsum, -1).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    mx = r_mat_inv.dot(mx)
    return mx

def pos_neg_split(nodes, labels):
    # 正负样本分割
    pos_nodes = []
    neg_nodes = cp.deepcopy(nodes)
    aux_nodes = cp.deepcopy(nodes)
    for idx, label in enumerate(labels):
        if label == 1:
            pos_nodes.append(aux_nodes[idx])
            neg_nodes.remove(aux_nodes[idx])

    return pos_nodes, neg_nodes

def undersample(pos_nodes, neg_nodes, scale=1):
    # 对负样本进行下采样，平衡正负样本数量
    aux_nodes = cp.deepcopy(neg_nodes)
    aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))
    batch_nodes = pos_nodes + aux_nodes

    return batch_nodes

def calculate_g_mean(y_true, y_pred):
    cm = confusion_matrix(y_true, y_pred)
    sensitivities = []
    for i in range(len(cm)):
        TP = cm[i, i]
        FN = cm[i, :].sum() - TP
        sensitivity = TP / (TP + FN) if (TP + FN) != 0 else 0
        sensitivities.append(sensitivity)
    g_mean = np.prod(sensitivities) ** (1 / len(sensitivities))
    return g_mean


def test(idx_eval, y_eval, model, feat_data, edge_indexs, device):
    model.eval()
    with torch.no_grad():
        logits, _ = model(feat_data.to(device), edge_indexs, sub_nodes=None)
        x_softmax = torch.exp(logits).cpu().detach()
        positive_class_probs = x_softmax[:, 1].numpy()[np.array(idx_eval)]
        auc_score = roc_auc_score(np.array(y_eval), np.array(positive_class_probs))
        ap_score = average_precision_score(np.array(y_eval), np.array(positive_class_probs))
        label_prob = (np.array(positive_class_probs) >= 0.5).astype(int)
        f1_score_val = f1_score(np.array(y_eval), label_prob, average='macro')
        g_mean = calculate_g_mean(np.array(y_eval), label_prob)

    return auc_score, ap_score, f1_score_val, g_mean





