import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
from DWGI.models import DWGI, LogReg
from DWGI.utils import process
import pickle


def sim2(vector1, vector2):
    vector_a = np.mat(vector1)
    vector_b = np.mat(vector2)
    num = float(vector_a * vector_b.T)
    denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
    sim = num / denom
    return sim


def predict_subject():
    hid_units = 200
    sparse = False
    nonlinearity = 'prelu'
    ft_size = 200
    model = DWGI(ft_size, hid_units, nonlinearity)
    model.load_state_dict(torch.load(r'F:\mypython\final_subject\DWGI\best_dgi.pkl'))

    adj, features, index, node_subgraph, subgraph_node, doc_word_edge = process.load_data()
    # 求出单词节点的范围和文档节点的范围,子图编号的范围
    word_index_min, word_index_max, doc_index_min, doc_index_max, graph_min, graph_max = process.get_doc_word_index_range(
        node_subgraph)
    doc_word_edge = process.get_doc_word_edge(index)
    features = process.preprocess_features(features)
    nb_nodes = features.shape[0]
    ft_size = features.shape[1]
    # nb_classes = labels.shape[1]

    adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))

    if sparse:
        sp_adj = process.sparse_mx_to_torch_sparse_tensor(adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()

    features = torch.FloatTensor(features[np.newaxis])
    if not sparse:
        adj = torch.FloatTensor(adj[np.newaxis])

    embeds, _ = model.embed(features, sp_adj if sparse else adj, sparse, None)
    embeds = embeds.squeeze(dim=0).numpy()
    print(embeds)
    print(embeds.shape)

    left_words = []
    with open(r'F:\mypython\final_subject\bbdw\exclude_doc_single_word_out\connected_graph_exclude_doc.txt', 'rb') as f:
        g = pickle.load(f)
        for node in g.nodes:
            left_words.append(node.name)

    # 获得文档和单词之间所有的边（包含孤立节点）
    with open(r'F:\mypython\final_subject\bbdw\out\doc_word_edge_list.txt', 'rb') as f:
        doc_word_edges = pickle.load(f)

    doc_word_dict = {}
    for edge in doc_word_edges:
        if doc_word_dict.get(edge.node1.name) is None:
            doc_word_dict[edge.node1.name] = [edge.node2.name]
        else:
            doc_word_dict[edge.node1.name].append(edge.node2.name)

    #     获得节点名字和编号的字典
    index_name_dict = {}
    with open(r'F:\mypython\final_subject\DWGI\data\index', 'rb') as f:
        remove_single_word_index = pickle.load(f)
        # 二战时期': 3972, '希特勒': 3973,

        for (k, v) in remove_single_word_index.items():
            index_name_dict[v] = k

    # ['0', '1',...'4303']
    total_doc = []
    with open(r'F:\mypython\final_subject\bbdw\out\connected_graph.txt', 'rb') as f:
        graph = pickle.load(f)
        for node in graph.nodes:
            if node.type == 'd':
                total_doc.append(node.name)

    print(len(total_doc))

    doc_subject_res = {}
    for doc in total_doc:
        # 如果该文本不在remove_single_word_index中，主题词就用其直接相连的词语
        if remove_single_word_index.get(doc) is None:
            doc_subject_res[doc] = doc_word_dict[doc]
        else:
            # 如果在，首先将孤立节点作为主题词，其余就使用相似度计算
            direct_words = doc_word_dict[doc]
            for word in direct_words:
                if word not in left_words:
                    if doc_subject_res.get(doc) is None:
                        doc_subject_res[doc] = [word]
                    else:
                        doc_subject_res[doc].append(word)
            #         使用相似度计算
            doc_index = remove_single_word_index[doc]
            #         获得文档所属的子图列表
            doc_sub_graph_list = process.get_doc_sub_graph(doc_index, doc_word_edge, node_subgraph)
            #         候选单词列表
            host_words_index = []
            for subgraph in doc_sub_graph_list:
                host_words_index.extend(subgraph_node[subgraph])

            word_sim_dict = {}
            #         计算相似度
            for word_index in host_words_index:
                sim = sim2(embeds[doc_index], embeds[word_index])
                word_sim_dict[index_name_dict[word_index]] = sim

            #         对结果排序
            sort_dict_res = sorted(word_sim_dict.items(), key=lambda item: item[1], reverse=True)
            append_word = []
            for word_sir in sort_dict_res:
                append_word.append(word_sir[0])
            # print(append_word)
            if doc_subject_res.get(doc) is None:
                doc_subject_res[doc] = append_word
            else:
                doc_subject_res[doc].extend(append_word)
            print(doc_subject_res[doc])

    with open(r'F:\mypython\final_subject\DWGI\doc_subject_res.txt', 'wb') as f:
        pickle.dump(doc_subject_res, f)

    # for doc_index in range(doc_index_min, doc_index_max+1):
    #     doc_emb = embeds[doc_index]
    #     doc_sub_graphs = process.get_doc_sub_graph(doc_index, doc_word_edge, node_subgraph)
    #     for sub_graph_index in doc_sub_graphs:
    #         # 每个子图下单词编号的列表
    #         words = subgraph_node[sub_graph_index]
    #         for word in words:
    #             print(word)


if __name__ == '__main__':
    predict_subject()
