# coding:utf-8
# author:zhangbiao
# e_mail:zhangbiao2014@163.com
import pickle
from tqdm import tqdm
import pandas as pd
import networkx as nx


# 一、拿到要用的所有数据
def all_need_data(jichu_path_df,jichu_path_dict,yingyong_path_df,yingyong_path_dict,metrics_path):
    # 基础研究相关数据
    jichu_df = pd.read_excel(jichu_path_df)
    with open(jichu_path_dict, 'rb') as f:
        jichu_topic_fre = pickle.load(f)
    # 应用研究相关数据
    yingyong_df = pd.read_excel(yingyong_path_df)
    with open(yingyong_path_dict, 'rb') as f:
        yingyong_topic_fre = pickle.load(f)
    # 待补充的矩阵数据
    metrics = pd.read_excel(metrics_path)
    return jichu_df, jichu_topic_fre, yingyong_df, yingyong_topic_fre, metrics

# 二、查找任意两个主题的关键词交集
def intersection_word(yingyong_df, l, jichu_df, h):
    """
    :param yingyong_df:
    :param l: 应用研究的第l个主题，l=0则是第0个主题
    :param jichu_df:
    :param h: 基础研究的第h个主题，h=0则是第0个主题
    :return: 得到交集词，以字典的形式呈现
    """
    yingyong_ls = yingyong_df.iloc[l-2, [2]].values[0].split('@')  # 应用研究的主题词在第3列
    jichu_ls = jichu_df.iloc[h-1, [2]].values[0].split('@')  # 基础研究的主题词在第3列
    yingyong_set = set(yingyong_ls)
    jichu_set = set(jichu_ls)
    jiaoji_set = yingyong_set & jichu_set
    return jiaoji_set, yingyong_ls, jichu_ls

# 三、计算显性关联度
def computer_xianxing_degree(h,l,jichu_topic_fre,yingyong_topic_fre,jiaoji_set):
    topic_xianxing = pd.DataFrame()
    # 获取交集词在基础研究主题中的概率
    h = h-1
    # print(h)
    jichu_topic_dict = dict(jichu_topic_fre[h])  # 基础第h(h=0,第0个)个主题
    jichu_word_fre = []
    jiaoji_word = []
    for word in jiaoji_set:
        jiaoji_word.append(word)
        jichu_word_fre.append(jichu_topic_dict[word])
    # 获取交集词在应用研究主题中的概率
    l = l - 2
    # print(l)
    yingyong_topic_dict = dict(yingyong_topic_fre[l])  # 应用第l个主题
    yingyong_word_fre = []
    for word in jiaoji_set:
        yingyong_word_fre.append(yingyong_topic_dict[word])
    # 整理DF
    topic_xianxing['交集词'] = jiaoji_word
    topic_xianxing['基础研究概率'] = jichu_word_fre
    topic_xianxing['应用研究概率'] = yingyong_word_fre
    topic_xianxing['交集词显性关联度'] = (topic_xianxing['基础研究概率'] + topic_xianxing['应用研究概率']) / 2
    return topic_xianxing,yingyong_topic_dict,jichu_topic_dict

# 四、构建网络
def build_matrix(co_authors_list, is_reverse):
    '''
    根据共同作者列表,构建共现矩阵(存储到字典中),并将该字典按照权值排序
    :param co_authors_list: 共同作者列表
    :param is_reverse: 排序是否倒序
    :return node_str: 三元组形式的节点字符串(且符合csv逗号分隔格式)
    :return edge_str: 三元组形式的边字符串(且符合csv逗号分隔格式)
    '''
    node_dict = {}  # 节点字典,包含节点名+节点权值(频数)
    edge_dict = {}  # 边字典,包含起点+目标点+边权值(频数)
    # 第1层循环,遍历整表的每行作者信息
    for row_authors in co_authors_list:
        row_authors_list = row_authors.split(' | ') # 依据','分割每行所有作者,存储到列表中
        # 第2层循环,遍历当前行所有作者中每个作者信息
        for index, pre_au in enumerate(row_authors_list): # 使用enumerate()以获取遍历次数index
            # 统计单个作者出现的频次
            if pre_au not in node_dict:
                node_dict[pre_au] = 1
            else:
                node_dict[pre_au] += 1
            # 若遍历到倒数第一个元素,则无需记录关系,结束循环即可
            if pre_au == row_authors_list[-1]:
                break
            connect_list = row_authors_list[index+1:]
            # 第3层循环,遍历当前行该作者后面所有的合作者,以统计两两作者合作的频次
            for next_au in connect_list:
                A, B = pre_au, next_au
                # 固定两两作者的顺序
                if A > B:
                    A, B = B, A
                key = A+','+B  # 格式化为逗号分隔A,B形式,作为字典的键
                # 若该关系不在字典中,则初始化为1,表示作者间的合作次数
                if key not in edge_dict:
                    edge_dict[key] = 1
                else:
                    edge_dict[key] += 1
    # 对得到的字典按照value进行排序
    node_str = sortDictValue(node_dict, is_reverse)  # 节点
    edge_str = sortDictValue(edge_dict, is_reverse)   # 边
    return node_str, edge_str
def sortDictValue(dict, is_reverse):
    '''
    将字典按照value排序
    :param dict: 待排序的字典
    :param is_reverse: 是否按照倒序排序
    :return s: 符合csv逗号分隔格式的字符串
    '''
    # 对字典的值进行倒序排序,items()将字典的每个键值对转化为一个元组,key输入的是函数,item[1]表示元组的第二个元素,reverse为真表示倒序
    tups = sorted(dict.items(), key=lambda item: item[1], reverse=is_reverse)
    s = ''
    for tup in tups:  # 合并成csv需要的逗号分隔格式
        s = s + tup[0] + ',' + str(tup[1]) + '\n'
    return s

# 五、隐性关联度的计算
# 5.1 拿到共现图
def make_graph(jiaoji_set, yingyong_ls, jichu_ls, y = '(yingyong)', j = '(jichu)'):
    # 制作替换字典
    replace_dict_yingyong = {}
    for word in jiaoji_set:
        replace_dict_yingyong[word] = word + y
    replace_dict_jichu = {}
    for word in jiaoji_set:
        replace_dict_jichu[word] = word + j
    # 替换
    new_yingyong_ls = [replace_dict_yingyong[i] if i in replace_dict_yingyong else i for i in yingyong_ls]
    new_jichu_ls = [replace_dict_jichu[i] if i in replace_dict_jichu else i for i in jichu_ls]
    # 构建共现三元组——>直接得到综合关联网络
    co_word_list = []
    co_word_list.append(" | ".join(new_yingyong_ls))  # 应用研究共现词
    co_word_list.append(" | ".join(new_jichu_ls))  # 基础研究共现词
    for word in jiaoji_set:  # 同时出现的词
        word_yingyong = word + y
        word_jichu = word + j
        co_word_list.append(word_yingyong + " | " + word_jichu)
    node_str, edge_str = build_matrix(co_word_list, is_reverse=True)
    # 判断存在隐性关联的节点对
    edge_ls_ls = [i.split(',') for i in edge_str.split()]
    g = nx.Graph()
    for a in edge_ls_ls:
        organ = a[0]
        organ2 = a[1]
        weight = a[2]
        g.add_weighted_edges_from([(organ, organ2, weight)])
    return g
# 5.2查找相邻节点
def search_dot(topic_xianxing,g,zhongjian_houzhui,zhongdian_houzhui):
    # print('查找当前主题的相邻节点...')
    # 主题的相邻节点
    yingyong_df_ = pd.DataFrame()
    qidian_ls, zhongjian_ls, zhongdian_ls = [], [], []
    for word in topic_xianxing['交集词'].values:
        word_zhongjian  = word + zhongjian_houzhui
        # 起点
        xianglin_ls = [k for k,v in g[word_zhongjian].items() if k.replace(zhongdian_houzhui,'') != word] # 获得相邻列表，且终点不是起点
        qidian_ls.extend(xianglin_ls)
        # 中间
        zhongjian = [word_zhongjian for i in range(len(xianglin_ls))] # 得到中间列表
        zhongjian_ls.extend(zhongjian)
        # 终点
        word_zhongdian  = word + zhongdian_houzhui
        zhongdian = [word_zhongdian for i in range(len(xianglin_ls))] # 得到终点列表
        zhongdian_ls.extend(zhongdian)
    yingyong_df_['起点'] = qidian_ls
    yingyong_df_['中间'] = zhongjian_ls
    yingyong_df_['终点'] = zhongdian_ls
    return yingyong_df_

# 5.3计算当前词对之间存在隐性关联关系的C  Z
def compute_c_z(yingyong_df_,yingyong_topic_dict,qidian_houzhui,zhongjian_houzhui):
    """
    yingyong_df_:待计算的主题,具有起点-中间-终点三列
    yingyong_topic_dict:交集词在应用研究主题中的概率，基础研究则是jichu_topic_dict
    qidian_houzhui：计算应用研究就是(yingyong)，计算基础研究就是(jichu)
    zhongjian_houzhui:计算应用研究就是(yingyong)，计算基础研究就是(jichu)
    """
    # C = 起点与中间在应用研究主题概率均值；Z = 中间与终点的显性关联度
    # 先算C
    yingyong_dff = yingyong_df_.copy()
    qidian_fre = []
    zhongjian_fre = []
    for word in yingyong_df_['起点'].values:
        qidian_fre.append(yingyong_topic_dict[word.replace(qidian_houzhui,'')])
    for word in yingyong_df_['中间'].values:
        zhongjian_fre.append(yingyong_topic_dict[word.replace(zhongjian_houzhui,'')])
    c_ls = [(a+b)/2 for a,b in zip(qidian_fre,zhongjian_fre)]
    yingyong_dff['起点-中间C'] = c_ls
    # 再填Z
    z_dict = {}
    for idx,jiaoji in enumerate(topic_xianxing['交集词'].values):
        z_dict[jiaoji] = topic_xianxing['交集词显性关联度'].values[idx]  # 搞个字典，便于查找
    z_ls = []
    for word in yingyong_df_['中间'].values:
        z_ls.append(z_dict[word.replace(zhongjian_houzhui,'')])
    yingyong_dff['中间-终点Z'] = z_ls
    return yingyong_dff
# 5.4 计算关联度
def compute_yinxing_degree(topic_xianxing,g,yingyong_topic_dict,jichu_topic_dict):
    # 计算应用研究主题节点开始的C Z
    yingyong_df_ = search_dot(topic_xianxing, g, zhongjian_houzhui='(yingyong)', zhongdian_houzhui='(jichu)')
    yingyong_dff = compute_c_z(yingyong_df_, yingyong_topic_dict=yingyong_topic_dict
                               , qidian_houzhui='(yingyong)',
                                zhongjian_houzhui='(yingyong)')
    # 计算基础研究主题节点开始的C Z
    jichu_df = search_dot(topic_xianxing, g, zhongjian_houzhui='(jichu)', zhongdian_houzhui='(yingyong)')
    jichu_dff = compute_c_z(jichu_df,yingyong_topic_dict = jichu_topic_dict
                            , qidian_houzhui = '(jichu)',zhongjian_houzhui = '(jichu)')
    # 合并
    all_z_c = pd.concat([yingyong_dff, jichu_dff])
    all_z_c['隐性关联度'] = 1 / (1 / all_z_c['中间-终点Z'] + 1 / all_z_c['起点-中间C'])
    z = all_z_c['隐性关联度'].sum()
    z_ = topic_xianxing['交集词显性关联度'].sum()
    topic_link_degree = z + z_
    return topic_link_degree



if __name__ == '__main__':
    # 一
    jichu_path_df = r'D:\jupyter\DK\DK数据\2数据处理\3聚类\4主题关联\基础研究-主题年份及主题词.xlsx'
    jichu_path_dict = r'D:\jupyter\DK\DK数据\2数据处理\3聚类\1基础研究\3主题—主题词.pkl'
    yingyong_path_df = r'D:\jupyter\DK\DK数据\2数据处理\3聚类\4主题关联\应用研究-主题年份及主题词.xlsx'
    yingyong_path_dict = r'D:\jupyter\DK\DK数据\2数据处理\3聚类\2应用研究\4主题—主题词.pkl'
    metrics_path = r'D:\jupyter\DK\DK数据\2数据处理\3聚类\4主题关联\基础研究-应用研究关联.xlsx'
    jichu_df, jichu_topic_fre, yingyong_df, yingyong_topic_fre, metrics = all_need_data(jichu_path_df,jichu_path_dict,yingyong_path_df,yingyong_path_dict,metrics_path)
    # 二
    # 遍历计算主题的topic_link_degree
    for l in tqdm(range(2, metrics.shape[1])):  # l从2（第二列）开始，第0个主题
        ls = []
        for h in range(0, metrics.shape[0]):  # h从0（第0行）开始，把年份加列表里面，h=1时是主题0
            if h == 0:
                ls.append(metrics.iloc[h, l])
            else:
                jiaoji_set, yingyong_ls, jichu_ls = intersection_word(yingyong_df, l, jichu_df, h)
                # print(jiaoji_set)
                topic_xianxing,yingyong_topic_dict,jichu_topic_dict =computer_xianxing_degree(h,l,jichu_topic_fre,yingyong_topic_fre,jiaoji_set)
                # 计算隐性关联度
                # 1拿到图
                g = make_graph(jiaoji_set, yingyong_ls, jichu_ls, y = '(yingyong)', j = '(jichu)')
                topic_link_degree = compute_yinxing_degree(topic_xianxing,g,yingyong_topic_dict,jichu_topic_dict)
                ls.append(topic_link_degree)


        metrics[metrics.columns[l]] = ls
    metrics.to_excel(r'D:\jupyter\DK\DK数据\2数据处理\3聚类\4主题关联\应用-基础1.xlsx',index=False)
