# 仅考虑单词节点构成的边，形成的子图，对大规模子图采用递归方法削减规模, 之后把原来图中的权重加上（不考虑文档节点），最后保存该图

import networkx as nx
import pickle
from gensim.models import KeyedVectors
import numpy as np
import sys
from bbdw.my_graph import Node, Edge, Graph


def sim2(vector1, vector2):
    vector_a = np.mat(vector1)
    vector_b = np.mat(vector2)
    num = float(vector_a * vector_b.T)
    denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
    sim = num / denom
    return sim


def sub_graph(node_list, edge_list, wv, sub_nodes, sub_edges):
    G = nx.Graph()
    for node in node_list:
        G.add_node(node)
    for link in edge_list:
        G.add_edge(link[0], link[1])

    for c in nx.connected_components(G):
        nodeSet = G.subgraph(c).nodes
        edgeSet = G.subgraph(c).edges

        # 超大图
        if len(nodeSet) > 200:
            new_edgeSet = []
            for e in edgeSet:
                word1 = e[0]
                word2 = e[1]
                if sim2(wv[word1], wv[word2]) >= 0.75:
                    new_edgeSet.append(e)

            # 递归，对该超大图求子图
            sub_graph(nodeSet, new_edgeSet, wv, sub_nodes, sub_edges)

        else:
            sub_nodes.append(nodeSet)
            sub_edges.append(edgeSet)

    return sub_nodes, sub_edges


def extract_sub_graph():
    sys.setrecursionlimit(1000000)

    wv = KeyedVectors.load(r'F:\model\ai-lab\ai_lab_model.wv', mmap='r')

    # 读取本地字典
    # 反序列化
    word_edge = []
    with open(r'F:\mypython\final_subject\bbdw\out\word_word_edge_list.txt', 'rb') as f:
        data = pickle.load(f)
        for edge in data:
            word_edge.append([edge.node1.name, edge.node2.name])

    word_list = []
    with open(r'F:\mypython\final_subject\bbdw\out\connected_graph.txt', 'rb') as f:
        graph = pickle.load(f)
        nodes = graph.nodes
        for node in nodes:
            if node.type == 'w':
                word_list.append(node.name)

    point_list = word_list
    link_list = word_edge

    sub_nodes = []
    sub_edges = []
    nodes, edges = sub_graph(point_list, link_list, wv, sub_nodes, sub_edges)

    new_nodes = []
    new_edges = []  # 新形成的边，不包含权重，只有名字，不是对象
    for node_view in nodes:
        for node in node_view:
            new_nodes.append(node)

    for edge_view in edges:
        for edge in edge_view:
            new_edges.append(edge)

    old_name_node_dict = {}

    with open(r'F:\mypython\final_subject\bbdw\out\connected_graph.txt', 'rb') as f:
        graph = pickle.load(f)
        nodes_object = graph.nodes
        for node_object in nodes_object:
            if node_object.type == 'w':
                old_name_node_dict[node_object.name] = node_object

    # 根据新的node和edge修改原来的node对象
    new_nodes_object = []
    new_name_node_dict = {}
    new_edges_object = []

    for node_name in new_nodes:
        new_node = Node(node_name, 'w', old_name_node_dict[node_name].index, None)
        new_name_node_dict[node_name] = new_node
        new_nodes_object.append(new_node)

    with open(r'F:\mypython\final_subject\bbdw\out\word_word_edge_list.txt', 'rb') as f:
        word_word_edge_list = pickle.load(f)

        for edge_name in new_edges:
            node1_name = edge_name[0]
            node2_name = edge_name[1]
            node1 = new_name_node_dict[node1_name]
            node2 = new_name_node_dict[node2_name]

            node1.add_neighbor(node2.name)
            node2.add_neighbor(node1.name)

            # 查询新边的权重
            weight = 0
            for edge_obj in word_word_edge_list:
                if (edge_obj.node1.name == node1.name and edge_obj.node2.name == node2.name) or (
                        edge_obj.node1.name == node2.name and edge_obj.node2.name == node1.name):
                    weight = edge_obj.weight
                    break

            if node1.name < node2.name:
                new_edge = Edge(node1, node2, weight)
            else:
                new_edge = Edge(node2, node1, weight)

            new_edges_object.append(new_edge)

        graph = Graph(new_nodes_object, new_edges_object)

        for edge in new_edges_object:
            print(edge.node1.name, edge.node2.name, edge.weight)

    # 序列化
    with open(r'F:\mypython\final_subject\bbdw\exclude_doc_out\connected_graph.txt', 'wb') as f:
        pickle.dump(graph, f)


if __name__ == "__main__":
    extract_sub_graph()
