import sys
import time

import numpy as np
import scipy.sparse as sp
import networkx as nx
import torch
import pandas as pd
from tqdm import tqdm

from config import args
import torch.nn as nn
import math
import os.path as osp
from warnings import simplefilter
import pickle
from sklearn.metrics import roc_auc_score
import sklearn.metrics as metrics

simplefilter(action='ignore', category=FutureWarning)
cur_time = []
useful_arg_name = ['dataset',
                   'learning_rate',
                   'epochs',
                   'dropout',
                   'weight_decay',
                   'infect_rate',
                   'recover_rate',
                   'generate_model',
                   'num_src',
                   'lb', 'ub',
                   'lpsi_alpha',
                   'cluster_number',
                   'gcn_layers',
                   'gcn_hidden',
                   'd_model',
                   'nhead',
                   'decoder_layers',
                   'eliminate_rate',
                   'use_dynamic_graph',
                   'data_group_size']


def runtime_counter(func):
    def wrapper(*args, **kw):
        start = time.time()
        wrapper.__name__ = func.__name__
        ret = func(*args, **kw)
        print('runtime of ' + func.__name__ +
              ':', time.time() - start, "seconds")
        return ret

    return wrapper


def get_norm_laplacian_sparse(coo_mat: sp.coo_matrix, enhance_self_loop=True) -> sp.coo_matrix:
    numnodes = coo_mat.shape[0]
    processed_mat = coo_mat if not enhance_self_loop else coo_mat + sp.eye(coo_mat.shape[0])
    D = np.power(np.array(processed_mat.sum(axis=1)).reshape(numnodes), -0.5)
    D_coo = sp.coo_matrix(
        (D, (np.arange(numnodes), np.arange(numnodes))),
        shape=(numnodes, numnodes)
    )
    # normalized Laplacian
    norm_laplacian_coo = D_coo.dot(processed_mat).dot(D_coo)
    return norm_laplacian_coo


def get_total_size(obj, seen=None):
    """递归计算对象及其引用对象的总内存大小"""
    size = sys.getsizeof(obj)
    if seen is None:
        seen = set()
    obj_id = id(obj)
    if obj_id in seen:
        return 0
    seen.add(obj_id)
    if isinstance(obj, dict):
        size += sum([get_total_size(v, seen) for v in obj.values()])
        size += sum([get_total_size(k, seen) for k in obj.keys()])
    elif hasattr(obj, '__dict__'):
        size += get_total_size(obj.__dict__, seen)
    elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
        size += sum([get_total_size(i, seen) for i in obj])
    return size


def get_model_gram_usage(model: nn.Module):
    params_size = sum([param.nelement() * param.element_size() for param in model.parameters()])
    buffers_size = sum([buf.nelement() * buf.element_size() for buf in model.buffers()])
    return (params_size + buffers_size) / 1024


def get_norm_laplacian(mat, enhance_self_loop=True) -> np.ndarray:
    numnodes = mat.shape[0]

    processed_mat = mat if not enhance_self_loop else mat + np.eye(numnodes)
    D = np.power(np.array(processed_mat.sum(axis=1)).reshape(numnodes), -0.5)
    norm_laplacian = np.diag(D) @ processed_mat @ np.diag(D)
    return norm_laplacian


def calc_evaluation_matrix(out: torch.Tensor, node_sort: torch.Tensor,
                           src_list: torch.Tensor, y_tensor: torch.Tensor,
                           lpsi: torch.Tensor, nxg: nx.Graph,
                           print_detail=True):
    src_num = len(src_list)
    # rule out the node with initial negative state
    lpsi[y_tensor == -1] -= 1e5
    # node_sort = torch.flip(out.argsort(), [0])
    lpsi_node_sort = torch.flip(lpsi.argsort(), [0])

    selected_node = node_sort[:src_num]
    lpsi_selected_node = lpsi_node_sort[:src_num]

    # hit_num = torch.count_nonzero(torch.isin(selected_node, src_list))
    hit_num = 0
    for src in src_list:
        if selected_node.__contains__(src):
            hit_num += 1
    lpsi_hit_num = 0
    for src in src_list:
        if lpsi_selected_node.__contains__(src):
            lpsi_hit_num += 1
    precision = hit_num / src_num
    lpsi_precision = lpsi_hit_num / src_num
    recall = hit_num / src_num
    lpsi_recall = lpsi_hit_num / src_num

    F1 = 2 * precision * recall / \
         (precision + recall) if (precision + recall) != 0 else -1
    lpsi_F1 = 2 * lpsi_precision * lpsi_recall / (precision + lpsi_recall) if (
                                                                                      lpsi_precision + lpsi_recall) != 0 else -1

    recall_pos = sorted([torch.where(node_sort == src)[0]
                         [0].item() for src in src_list])
    lpsi_recall_pos = sorted([torch.where(lpsi_node_sort == src)[
                                  0][0].item() for src in src_list])

    op_F1_list = []
    op_aver_erd_list = []
    op_F1_recall_ratio = []

    for idx, num in enumerate(recall_pos):
        p = (idx + 1) / (num + 1)
        r = (idx + 1) / src_num
        this_F1 = 2 * p * r / (p + r) if (p + r) != 0 else -1

        all_erd = 0
        for node in node_sort[:num + 1]:
            min_erd = 1e6
            for src in src_list:
                try:
                    erd = nx.shortest_path_length(nxg, node.item(), src.item())
                    min_erd = min(erd, min_erd)
                except nx.NetworkXNoPath:
                    continue
            all_erd += min_erd
        aver_erd = all_erd / (num + 1)
        op_F1_list.append(this_F1)
        op_aver_erd_list.append(aver_erd)
        op_F1_recall_ratio.append((num + 1) / len(out))

    general_score = sum([1 / (idx + 1) * pos for idx,
    pos in enumerate(sorted(recall_pos))]) / out.shape[0]
    lpsi_general_score = sum(
        [1 / (idx + 1) * pos for idx, pos in enumerate(sorted(lpsi_recall_pos))]) / lpsi.shape[0]

    min_all_recall = max(recall_pos) + 1
    lpsi_min_all_recall = max(lpsi_recall_pos) + 1

    if print_detail:
        print(f"[crgcn] val:{out[selected_node].detach().numpy()},src_val:{out[src_list].detach().numpy()}\n"
              f"[optinal F1] : F1_list={op_F1_list}, best={max(op_F1_list)}\n"
              f"[optinal erd] : erd_list={op_aver_erd_list}, best={min(op_aver_erd_list)}\n"
              f"[lpsi-matrix] : precision={lpsi_precision}, recall={lpsi_recall}, F1={lpsi_F1}\n"
              f"[expand] : src_in_predict_pos={recall_pos},min_need={min_all_recall},generat_score={general_score}\n"
              f"[lpsi-expand] : src_in_predict_pos={lpsi_recall_pos},min_need={lpsi_min_all_recall},generat_score={lpsi_general_score}")
    else:
        print("snapshot done")

    return max(op_F1_list), min(op_aver_erd_list), op_F1_recall_ratio[op_F1_list.index(max(op_F1_list))]


def calc_evaluation_matrix_without_LPSI(out: torch.Tensor, node_sort: torch.Tensor,
                                        src_list: torch.Tensor, nxg: nx.Graph,
                                        print_detail=True):
    src_num = len(src_list)

    selected_node = node_sort[:src_num]

    hit_num = 0
    for src in src_list:
        if selected_node.__contains__(src):
            hit_num += 1
    precision = hit_num / src_num
    recall = hit_num / src_num

    F1 = 2 * precision * recall / \
         (precision + recall) if (precision + recall) != 0 else -1
    recall_pos = sorted([torch.where(node_sort == src)[0]
                         [0].item() for src in src_list])

    op_F1_list = []
    op_aver_erd_list = []
    op_F1_recall_ratio = []
    for idx, num in enumerate(recall_pos):
        p = (idx + 1) / (num + 1)
        r = (idx + 1) / src_num
        this_F1 = 2 * p * r / (p + r) if (p + r) != 0 else -1

        all_erd = 0
        for node in node_sort[:num + 1]:
            min_erd = 1e6
            for src in src_list:
                try:
                    erd = nx.shortest_path_length(nxg, node.item(), src.item())
                    min_erd = min(erd, min_erd)
                except nx.NetworkXNoPath:
                    continue
            all_erd += min_erd
        aver_erd = all_erd / (num + 1)

        op_F1_list.append(this_F1)
        op_aver_erd_list.append(aver_erd)
        op_F1_recall_ratio.append((num + 1) / len(out))

    general_score = sum([1 / (idx + 1) * pos for idx,
    pos in enumerate(sorted(recall_pos))]) / out.shape[0]
    min_all_recall = max(recall_pos) + 1
    op_F1, op_erd, op_ratio = max(op_F1_list), min(op_aver_erd_list), op_F1_recall_ratio[
        op_F1_list.index(max(op_F1_list))]
    if print_detail:
        print(f"[crgcn] val:{out[selected_node].detach().numpy()},src_val:{out[src_list].detach().numpy()}\n"
              f"[optinal F1] : F1_list={op_F1_list}, best={max(op_F1_list)}\n"
              f"[optinal erd] : erd_list={op_aver_erd_list}, best={min(op_aver_erd_list)}\n"
              f"[expand] : src_in_predict_pos={recall_pos},min_need={min_all_recall},generat_score={general_score}\n")
    else:
        print(
            f"best F1:{op_F1},best erd:{op_erd},average recall ratio:{op_ratio}")

    return max(op_F1_list), min(op_aver_erd_list), op_F1_recall_ratio[op_F1_list.index(max(op_F1_list))]


def evaluate(sum_out, node_sort: torch.Tensor, src_list, recall_ratio, nxg):
    src_num = len(src_list)
    recall_size = int(recall_ratio * len(sum_out)) + 1
    hit_num = sum(
        [1 if node in src_list else 0 for node in node_sort[:recall_size]])
    p = hit_num / recall_size
    r = hit_num / src_num
    F1 = 2 * p * r / (p + r) if (p + r) != 0 else 0

    erd_sum = 0
    for node in node_sort[:recall_size]:
        min_erd = 1e6
        for src in src_list:
            try:
                erd = nx.shortest_path_length(nxg, node.item(), src.item())
                min_erd = min(erd, min_erd)
            except nx.NetworkXNoPath:
                continue
        erd_sum += min_erd
    aver_erd = erd_sum / recall_size
    return F1, aver_erd


def evaluate_using_src(sum_out, node_sort: torch.Tensor, src_list, nxg, calc_erd=True):
    src_num = len(src_list)
    recall_pos = sorted([torch.where(node_sort == src)[0]
                         [0].item() for src in src_list])
    op_F1_list = []
    op_pr_list = []
    op_re_list = []
    op_aver_erd_list = []
    op_F1_recall_ratio = []
    for idx, num in enumerate(recall_pos):
        p = (idx + 1) / (num + 1)
        r = (idx + 1) / src_num
        this_F1 = 2 * p * r / (p + r) if (p + r) != 0 else -1

        all_erd = 0
        if calc_erd:
            for node in node_sort[:num + 1]:
                min_erd = 1e6
                for src in src_list:
                    try:
                        erd = nx.shortest_path_length(
                            nxg, node.item(), src.item())
                        min_erd = min(erd, min_erd)
                    except nx.NetworkXNoPath:
                        continue
                all_erd += min_erd

        aver_erd = all_erd / (num + 1)
        op_pr_list.append(p)
        op_re_list.append(r)
        op_F1_list.append(this_F1)
        op_aver_erd_list.append(aver_erd)
        op_F1_recall_ratio.append((num + 1) / len(sum_out))

    max_F1_index = op_F1_list.index(max(op_F1_list))
    # return max(op_F1_list), min(op_aver_erd_list), op_F1_recall_ratio[op_F1_list.index(max(op_F1_list))], recall_pos
    return op_F1_list[max_F1_index], op_aver_erd_list[max_F1_index], \
        op_pr_list[max_F1_index], op_re_list[max_F1_index], recall_pos


def decay_sequence(ub, lb, size):
    step = (ub - lb) / size
    return [(ub - i * step) / (ub + lb) * 2 / size for i in range(size)]


def aggregate_out(out, y_tensor, ub=1, lb=0.5):
    if args.generate_model != "SIR":
        out[y_tensor == -1] -= 1e5
    sum_decay = torch.tensor(
        [decay_sequence(ub, lb, len(out))]).transpose(0, 1).to(out.device)
    sum_out = (out * sum_decay).sum(dim=0)
    return sum_out


def arg_val_list(argz, names):
    return [argz.__getattribute__(name) for name in names]


def arg_name_list(argz):
    return [name for name in argz.__dict__]


def get_candidate(score: np.ndarray, nxg: nx.Graph) -> np.ndarray:
    candidate = []
    n = len(score)
    for i in range(n):
        is_best = True
        for neighbor in nxg.neighbors(i):
            if score[neighbor] >= score[i]:
                is_best = False
        if is_best:
            candidate.append(i)
    return np.array(candidate)


def eval_by_candidate(gt: np.ndarray, candidate: np.ndarray, nxg: nx.Graph):
    hit_num = 0
    erd_sum = 0
    for node in candidate:
        if node in gt:
            hit_num += 1

        min_erd = 1e6
        for src in gt:
            try:
                erd = nx.shortest_path_length(nxg, node, src)
                min_erd = min(erd, min_erd)
            except nx.NetworkXNoPath:
                continue
        erd_sum += min_erd

    pr = hit_num / len(candidate)
    re = hit_num / len(gt)
    F1 = 2 * pr * re / (pr + re) if (pr + re) != 0 else 0
    aver_erd = erd_sum / len(candidate)

    return pr, re, F1, aver_erd


def init_csv(argz, rst_name_list, result=None):
    data = [useful_arg_name + rst_name_list]
    if result is None:
        data.append([argz.__getattribute__(name)
                     for name in argz.__dict__] + ['/'] * len(rst_name_list))
    else:
        data.append([argz.__getattribute__(name)
                     for name in argz.__dict__] + result)
    df = pd.DataFrame(data)
    df.to_excel(f"./stats/arg_and_result.xlsx", header=False, index=False)


def add_train_record(argz, rst_name_list, rst):
    # needs pip3 install openpyxl
    if osp.exists(f"./stats/arg_and_result.xlsx"):
        df = pd.read_excel(f"./stats/arg_and_result.xlsx", header=None)
        headers = df.loc[0].to_list()[:-len(rst_name_list)]
        df.loc[len(df.index)] = arg_val_list(argz, headers) + rst
        df.to_excel(f"./stats/arg_and_result.xlsx", header=False, index=False)
    else:
        init_csv(argz, rst_name_list, rst)


def load_if_exist(path):
    if osp.exists(path):
        with open(path, 'rb') as fin:
            return pickle.load(fin)
    else:
        return None


def store_to_path(path, obj):
    if not osp.exists(path):
        with open(path, 'wb') as fout:
            pickle.dump(obj, fout)


class Partition:
    def __init__(self, batch: int, part: int = -1, part_size: int = -1):
        assert batch > 0 > part * part_size
        if part == -1:
            part = math.ceil(batch / part_size)

        part = min(part, batch)
        div, mod = divmod(batch, part)
        self.psize = [x + y for x, y in zip([div] * part, [1] * mod + [0] * (part - mod))]
        self.start_idx = [0] * len(self.psize)
        for i in range(1, len(self.psize)):
            self.start_idx[i] = self.psize[i - 1] + self.start_idx[i - 1]
        self.range_ie = [(0, 0)] * len(self.psize)
        for i in range(len(self.psize)):
            self.range_ie[i] = self.start_idx[i], self.start_idx[i] + self.psize[i]

    def get_start_idx(self) -> list[int]:
        return self.start_idx

    def get_end_idx(self) -> list[int]:
        return [x[1] - 1 for x in self.range_ie]

    def get_range_ie(self) -> list[tuple[int, int]]:
        return self.range_ie

    def get_range_dense(self):
        return [list(range(tup[0], tup[1])) for tup in self.range_ie]


class SparseGraphIter:
    def __init__(self, graph: sp.csr_matrix):
        self._graph = graph
        self._size = graph.shape[0]

    def __getitem__(self, item) -> np.ndarray:
        return self._graph[item].nonzero()[1]

    def __len__(self):
        return self._size


def F1score(label: torch.tensor, output: torch.tensor):
    """
    output: 模型生成的评分
    label: 实际标签(0/1)
    """
    precision, recall, thresholds = metrics.precision_recall_curve(label, output)

    return [
        2 * p * r / (p + r) if (p + r) != 0 else 0
        for p, r in zip(precision, recall)
    ]


def criterion(output: torch.tensor, label: torch.tensor):
    """
    output: 模型生成的评分\n
    label: 实际标签(0/1)\n
    @return : AUC, bestF1
    """
    assert output.ndim == 1 and label.ndim == 1 and len(output) == len(label)
    auc, best_f1 = 0, 0
    try:
        auc = roc_auc_score(label, output)
        f1_list = F1score(label, output)
        best_f1 = max(f1_list)
    except Exception:
        pass
    return auc, best_f1


def avg_error_distance(nxg: nx.Graph, pred_srcs: torch.tensor, gt_srcs: torch.tensor) -> float:
    all_erd = []
    pred_srcs = pred_srcs.to(dtype=torch.int64)
    gt_srcs = gt_srcs.to(dtype=torch.int64)
    for i, pred_src in tqdm(list(enumerate(pred_srcs)), desc="calculating shortest path"):
        path_cutoff = args.max_erd
        for j, gt_src in enumerate(gt_srcs):
            if j == i or path_cutoff == 0:
                path_cutoff = 0
                break
            try:
                v = nx.shortest_path_length(nxg, i, j)
                path_cutoff = min(v, path_cutoff)
            except nx.NetworkXNoPath:
                continue
        if path_cutoff != args.max_erd:
            all_erd.append(path_cutoff)
    return sum(all_erd) / len(all_erd) if all_erd else args.max_erd


class CriterionMetrics:
    def __init__(self):
        self.metric_dict = dict()

    def add(self, metric: str, item: str, value: float):
        if item not in self.metric_dict:
            self.metric_dict[item] = {
                metric: [value]
            }
        elif metric not in self.metric_dict[item]:
            self.metric_dict[item][metric] = [value]
        else:
            self.metric_dict[item][metric].append(value)

    def clear(self):
        self.metric_dict.clear()

    def conclude(self):
        ans = {
            item: dict() for item in self.metric_dict.keys()
        }
        for item, _metrics in self.metric_dict.items():
            for metric, values in _metrics.items():
                ans[item][metric] = sum(values) / len(values) if len(values) > 0 else 0
        return ans


if __name__ == '__main__':
    # init_csv(args)

    # cm = CriterionMetrics()
    # cm.add('acc', 'lpsi', 0.5)
    # cm.add('f1', 'lpsi', 0.6)
    # cm.add('acc', 'gen', 0.7)
    # cm.add('f1', 'gen', 0.8)
    # print(cm.conclude())

    graph = nx.path_graph(5)
    print(graph.nodes)
    print(nx.shortest_path_length(graph, source=1, target=2))
