#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Contact :   raogx.vip@hotmail.com
@License :   (C)Copyright 2017-2018, Liugroup-NLPR-CASIA

@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2022/10/7 17:23   gxrao      1.0         None
'''

# import lib
import unittest
# 1.导入必要的库
import numpy as np
import random
import math
import operator
from collections import defaultdict
import networkx as nx
from warnings import warn
import os
from itertools import islice
import operator
import csv
import pandas as pd
import functools
from pathlib import Path
import json

def get_dict_key(dic, value):
    # 根据字典的值value获得该值对应的key
    key = list(dic.keys())[list(dic.values()).index(value)]
    return key

def read_nodes_file_csv(nodes_file):
    # 读取节点
    nodedict = {}   # 键-变量名：值-id
    # nodeAttrslist = []
    with open(nodes_file, 'r') as file:
        list_of_nodes = file.readlines()
    for index, line in enumerate(islice(list_of_nodes, 1, None)):
        line = line.split(',')
        # nodes[line[5]].append(line) # 以节点名为键，属性等都作为值存起来
        nodedict[line[5]] = index
        # nodeAttrslist.append(line)
    # return nodes
    return nodedict

def read_edges_file_csv(edges_file, nodedict):
    '''
    读取关系
    :param edges_file: 边集.csv
    :param nodedict:   {} 键-变量名：值-id
    :return:
    '''
    srcs = []
    dsts = []
    # attr1 = []
    # attr2 = []
    attrs = []
    with open(edges_file, 'r') as file:
        list_of_edges = file.readlines()
    for line in islice(list_of_edges, 1, None):
        line = line.split(',')  # src源点(source),dst目的地(destination)
        src = line[0]   # 节点类型可以去节点集中搜索
        dst = line[2]
        srcs.append(nodedict[src])
        dsts.append(nodedict[dst])
        # 暂时只写进去语义相似度,分词共现频率（好像是可以放很多值进去的，分词共现关系是str就先不放）
        # attr1.append(float(line[4]))       # 语义相似度
        # attr2.append(float(line[6]))       # 分词共现频率
        attrs.append([float(line[4]), float(line[6])])
        # edges[(src, dst)].append(line)  # 以关系(src, dst)为键，属性等都作为值存起来
    # return edges
    return tuple(srcs), tuple(dsts), tuple(attrs)

def get_nstart(nodes_df, N):
    '''
    迭代的PR值初值
    :param nodes_df: 节点集合df型
    :param N: 主节点个数
    :return: list
    '''
    nstart = []  # 默认nstart就是这个:主节点为归一化均分，属性节点为0
    nodes_type = nodes_df.loc[:, '节点类型']
    for node_type in nodes_type:
        if node_type == '主节点':
            nstart.append(1.0 / N)
        else:
            nstart.append(0)
    return np.array(nstart)

def get_personalization(nodes_df, N):
    '''
    迭代的PR个性化值:将我觉得有意义的 点属性 作为每个节点的个性化值
    点属性应用（1）：C值（综合考虑，根据`cValue(length, f, s, nf)` 来计算）
    :param nodes_df: 节点集合df型
    :param N: 主节点个数
    :return: list
    '''
    personalization = []
    c_idf_list = nodes_df.loc[:, 'c值']
    sum_c_idf = sum(c_idf_list)
    for i in c_idf_list:
        personalization.append(i/sum_c_idf)
    return np.array(personalization)

def mergeAttrs(attrs):
    '''融合多个边属性'''
    return [sum(line)/len(line) for line in attrs]

def labelCmp(a, b):
    # 降序
    # a[0]是键    'b'
    # a[1]是值    [10,2]
    if a[0]!=b[0]:  # 键优先级最高
        if b[1][0] == a[1][0]:
            return len(b[1][1]) - len(a[1][1])    # 值的第二个优先级最低
        return b[1][0] - a[1][0]
    return b[0]-a[0]

def sumScores_attrsNode2termNode(nodes_df, PPR_dict):
    '''
    计算术语本体的PPR总分，并降序排列
    :param nodes_df: 节点属性集df
    :param PPR_dict: 根据节点索引（而不是术语索引）排列的PPRdict
                    {节点索引：PPR，……}
    :return: dict   {"术语索引": ["PPR分数", ["术语变体",……]}
    '''
    termNode_attrs = {}     # {"术语索引": ["PPR分数", ["术语变体",……]}
    for index, PPR in PPR_dict.items():
        node_attrs = list(nodes_df.iloc[index, :])
        # node_type = node_attrs[0]
        term_id = node_attrs[1]
        term_variant = node_attrs[5]
        if term_id in termNode_attrs.keys():    # 本体已创建
            termNode_attrs[term_id][0] += PPR
            termNode_attrs[term_id][1].append(term_variant)
        else:
            termNode_attrs[term_id] = [PPR, [term_variant]]

    sorted_scores = dict(sorted(termNode_attrs.items(), key=functools.cmp_to_key(labelCmp)))
    return sorted_scores

def pagerank_scipy_diy(
    nodes_file, edges_file,
    alpha=0.85,
    max_iter=100,
    tol=1.0e-6,
    dangling=None,
    encoding='ANSI'
):
    msg = "networkx.pagerank_scipy is deprecated and will be removed in NetworkX 3.0, use networkx.pagerank instead."
    warn(msg, DeprecationWarning, stacklevel=2)
    import numpy as np
    import scipy as sp
    import scipy.sparse  # call as sp.sparse

    # 1.准备数据
    nodes_df = pd.read_csv(nodes_file, encoding=encoding)
    nodedict = read_nodes_file_csv(nodes_file)
    src, dst, attrs = read_edges_file_csv(edges_file, nodedict)
    nodelist = nodedict.keys()
    N = len(nodedict)  # 图G的长度就是节点总数
    if N == 0:
        return {}

    # 2.初始化

    # 2.1★ 初始化正常节点的转移概率M-边属性attrs:语义相似度,分词共现频率
    # 坐标格式的稀疏矩阵
    attr = mergeAttrs(attrs)    # 融合多个边属性
    M = sp.sparse.coo_matrix((attr, (src, dst)), shape=(N, N), dtype=float).asformat('csr')     # 返回作为SciPy稀疏矩阵的图邻接矩阵

    S = np.array(M.sum(axis=1)).flatten()
    '''求出每行元素的和
    对csr_matrix类型来说，是求节点src的出度之和，然后打平，表示为各个节点的 出度之和
    '''

    S[S != 0] = 1.0 / S[S != 0]  # 剔除带0的项，并取倒数（ S 是 1/节点src的出度之和 ）
    Q = sp.sparse.spdiags(S.T, 0, *M.shape, format="csr")  # Q 是把S的值赋给自身构成的对角线矩阵
    M = Q * M  # 在原来的基础上乘以Q

    # 2.2 初始化所有节点的PageRank迭代的起始值
    x = get_nstart(nodes_df, N)

    # 2.3★ 初始化正常节点的个性化值-点属性：C值的改进-C_idf值
    p = get_personalization(nodes_df, N)

    # 2.4 初始化无索引节点权重
    if dangling is None:    #将p(PageRank迭代的起始值)分配给任何“dangling”节点的外延，即没有任何外延的节点。
        dangling_weights = p
    else:
        # 若设置了哪些为悬挂节点，则对悬挂节点进行归一化作为悬挂节点权重：将悬垂字典按节点列表顺序转换为数组
        dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float)
        dangling_weights /= dangling_weights.sum()

    # 2.5 标记无索引节点
    is_dangling = np.where(S == 0)[0]

    # 3.迭代power iteration: make up to max_iter iterations  （Power迭代:收敛(逼近)为max_iter迭代  ）
    for _ in range(max_iter):
        xlast = x
        # pageRank的核心数学公式(修改核心公式，加入边属性，点属性)
        x = alpha * (x * M + sum(x[is_dangling]) * dangling_weights) + (1 - alpha) * p
        # check convergence, l1 norm(检验收敛性，l1范数)
        err = np.absolute(x - xlast).sum()
        # 4.判断是否收敛，收敛则结束，否则继续迭代
        if err < N * tol:   #若收敛则输出
            return sumScores_attrsNode2termNode(nodes_df, dict(zip(nodedict.values(), map(float, x)))) #把节点和节点值打包在一起形成zip类型，再转换成dict
            # return nodedict, nodes_df, dict(zip(nodedict.values(), map(float, x)))   #把节点和节点值打包在一起形成zip类型，再转换成dict
    raise nx.PowerIterationFailedConvergence(max_iter)

class test(unittest.TestCase):
    def load_settings(self, settings_file="config/settings.json"):
        '''
        加载设置load settings
        缺省项默认为default的原值，存在于 settings_file 中的属性，就更改为其中的值
        :return:  default存储了设置信息的dict
        '''
        # --- 默认设置 ---
        default = {
           "input_pre": "D:\\FILE\\Repository\\keyword_extraction-master关键词抽取\\output",
           "input_nodes_name": "output_nodes.csv",
           "input_edges_name": "output_edges.csv",
           "input_nodes": "",
           "input_edges": "",

           "output_pre": "outputs",
           "output_PageRank_name": "output_PageRank.csv",
           "output_PageRank": "",

           "encoding": "ANSI"
        }
        default["input_nodes"] = os.path.join(default["input_pre"], default["input_nodes_name"])
        default["input_edges"] = os.path.join(default["input_pre"], default["input_edges_name"])
        default["output_PageRank"] = os.path.join(default["output_pre"], default["output_PageRank_name"])
        # --- 加载设置(缺省项为默认设置，否则设置为加载设置中的属性值) ---
        try:
            with open(Path(settings_file), "r") as file:

                settings = json.load(file)
                file.close()

                if "input_pre" in settings:
                    default["input_pre"] = settings["input_pre"]
                    if not os.path.isdir(default["input_pre"]):
                        os.makedirs(default["input_pre"])
                else:
                    print("WARNING: Invalid POS input_pre: " + default["input_pre"])
                    print("         Using the default instead.\n")

                if "output_pre" in settings:
                    default["output_pre"] = settings["output_pre"]
                    if not os.path.isdir(default["output_pre"]):
                        os.makedirs(default["output_pre"])
                else:
                    print("WARNING: Invalid POS output_pre: " + default["output_pre"])
                    print("         Using the default instead.\n")

                if "input_nodes_name" in settings:
                    default["input_nodes_name"] = settings["input_nodes_name"]
                    default["input_nodes"] = os.path.join(default["input_pre"], default["input_nodes_name"])
                else:
                    print("WARNING: Invalid POS input_nodes_name: " + default["input_nodes_name"])
                    print("         Using the default instead.\n")

                if "input_edges_name" in settings:
                    default["input_edges_name"] = settings["input_edges_name"]
                    default["input_edges"] = os.path.join(default["input_pre"], default["input_edges_name"])
                else:
                    print("WARNING: Invalid POS input_edges_name: " + default["input_edges_name"])
                    print("         Using the default instead.\n")

                if "output_PageRank_name" in settings:
                    default["output_PageRank_name"] = settings["output_PageRank_name"]
                    default["output_PageRank"] = os.path.join(default["output_pre"], default["output_PageRank_name"])
                    if os.path.exists(default["output_PageRank"]):  # 存在就先删除
                        os.remove(default["output_PageRank"])
                else:
                    print("WARNING: Invalid POS output_PageRank_name: " + default["output_PageRank_name"])
                    print("         Using the default instead.\n")
        except:
            # 没有找到加载设置文件，全部使用默认值
            print(f"WARNING: 设置文件 {os.path.abspath(settings_file)} 没找到。 使用默认值代替。\n")

        print("--- 设置Settings ---")
        print("* input_nodes     :", default["input_nodes"])
        print("* input_edges     :", default["input_edges"])
        print("* output_PageRank :", default["output_PageRank"])
        print("-------------------")

        return default

    def test_load_settings(self):
        settings = self.load_settings()

        print("--- 再次输出Settings ---")
        print("* input_nodes     :", settings["input_nodes"])
        print("* input_edges     :", settings["input_edges"])
        print("* output_PageRank :", settings["output_PageRank"])
        print("-----------------------")

        settings_file = "config/settings_Europarl.json"
        settings = self.load_settings(settings_file=settings_file)

        print("--- 再次输出Settings ---")
        print("* input_nodes     :", settings["input_nodes"])
        print("* input_edges     :", settings["input_edges"])
        print("* output_PageRank :", settings["output_PageRank"])
        print("-----------------------")

    def untest_pagerank_scipy(self):
        # 1.点集，边集路径
        pre = r'D:\FILE\Repository\keyword_extraction-master关键词抽取\output'
        nodes_file = os.path.join(pre, 'output_nodes.csv')
        edges_file = os.path.join(pre, 'output_edges.csv')

        # 2.pagerank算法,并降序排序，只存储术语本体
        sorted_scores = pagerank_scipy_diy(nodes_file, edges_file)     # pageRank算法

        # 3.PPR分数输出并存储
        PageRank_output = r'.\outputs\PageRank_output.csv'
        encoding = 'ANSI'
        if os.path.exists(PageRank_output):
            os.remove(PageRank_output)

        print("术语id\tPPR分数\t术语变体")
        with open(PageRank_output, 'a', newline='', encoding=encoding) as csvfile:  # newline=" "是为了避免写入之后有空行
            writer = csv.writer(csvfile)
            header = ["术语索引", "PPR分数", "术语变体"]
            writer.writerow(header)  # 使用 writerow 写入一行

        for key, value in sorted_scores.items():
            print(f"{key}\t{value[0]}\t{value[1]}")
            with open(PageRank_output, 'a', newline='', encoding=encoding) as csvfile:  # newline=" "是为了避免写入之后有空行
                writer = csv.writer(csvfile)
                header = [key] + value
                writer.writerow(header)  # 使用 writerow 写入一行

        print('test over')


# 注意如果用windows运行程序的话，要加if __name__ == '__main__':才能正常运行
if __name__ == '__main__':
    unittest.main()
    print('over')
