# -*- coding:UTF-8 -*
from bbdw.my_graph import Node, Edge, Graph
import pandas as pd
import numpy as np
import pickle
from gensim.models import KeyedVectors
import sys


def sim2(vector1, vector2):
    vector_a = np.mat(vector1)
    vector_b = np.mat(vector2)
    num = float(vector_a * vector_b.T)
    denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
    sim = num / denom
    return sim


# 单词不存在时，在文本和单词之间添加新的边
def set_new_node(word, type, doc_index, doc_node,
                 word_index_dict, word_node_dict,
                 tf_idf_weight, doc_word_edge_list,
                 word_nodes_list):
    try:
        word_index = word_index_dict[word]  # 这里可能发生异常

        new_node = Node(word, type, word_index, None)

        word_node_dict[word] = new_node
        # 查询单词 文档之间边的权重
        node_doc_edge_weight = tf_idf_weight[doc_index][word_index - 10000]

        doc_edge = Edge(doc_node, new_node, node_doc_edge_weight)  # 添加文档和单词之间的边
        doc_word_edge_list.append(doc_edge)
        word_nodes_list.append(new_node)

        doc_node.add_neighbor(new_node.name)
        new_node.add_neighbor(doc_node.name)
    except KeyError:  # 如果tf-idf矩阵中没有该单词
        new_node = Node(word, type, -1, None)

        word_node_dict[word] = new_node
        # 查询单词 文档之间边的权重
        node_doc_edge_weight = 0.1

        doc_edge = Edge(doc_node, new_node, node_doc_edge_weight)  # 添加文档和单词之间的边
        doc_word_edge_list.append(doc_edge)
        word_nodes_list.append(new_node)
        return


# 单词已经存在时，在文本和单词之间添加新的边
def set_old_node(word, type, doc_index, doc_node, word_node_dict, doc_word_edge_list,
                 word_index_dict, tf_idf_weight):
    node = word_node_dict[word]

    flag = False

    # 如果已经有边，就不用设置了
    for doc_word_edge in doc_word_edge_list:
        if doc_word_edge.node1.name == doc_node.name and doc_word_edge.node2.name == word:
            flag = True
            break

    if not flag:
        word_index = word_index_dict.get(word)

        if word_index is None:
            node_doc_edge_weight = 0.1
        else:
            node_doc_edge_weight = tf_idf_weight[doc_index][word_index - 10000]

        doc_edge = Edge(doc_node, node, node_doc_edge_weight)  # 添加文档和单词之间的边
        doc_word_edge_list.append(doc_edge)

        doc_node.add_neighbor(node.name)
        node.add_neighbor(doc_node.name)


def update_edge_weight(node1, node2, word_word_edge_list):
    word1 = node1.name if node1.name < node2.name else node2.name
    word2 = node2.name if word1 == node1.name else node1.name
    for edge in word_word_edge_list:
        if edge.node1.name == word1 and edge.node2.name == word2:
            edge.weight += 1
            break


def process_two_word(word1, word2, doc_index, word_node_dict, node_doc,
                     wv, sim_threshold, word_word_edge_list,
                     word_index_dict, tf_idf_weight,
                     doc_word_edge_list, word_nodes_list):
    if word1 not in word_node_dict:
        set_new_node(word1, 'w', doc_index, node_doc,
                     word_index_dict, word_node_dict,
                     tf_idf_weight, doc_word_edge_list,
                     word_nodes_list
                     )
    else:
        set_old_node(word1, 'w', doc_index, node_doc,
                     word_node_dict, doc_word_edge_list,
                     word_index_dict, tf_idf_weight
                     )

    if word2 not in word_node_dict:
        set_new_node(word2, 'w', doc_index, node_doc,
                     word_index_dict, word_node_dict,
                     tf_idf_weight, doc_word_edge_list,
                     word_nodes_list
                     )
    else:
        set_old_node(word2, 'w', doc_index, node_doc,
                     word_node_dict, doc_word_edge_list,
                     word_index_dict, tf_idf_weight
                     )

    #   检查两个单词之间的边是否存在(如果存在，那么直接权重加1，否则创建新的边并保存)
    if word1 != word2:  # 必须保证两个词语不一样
        node1 = word_node_dict.get(word1)
        node2 = word_node_dict.get(word2)

        # flag为false表示node1的边集中没有node2，同时node2的边集中没有node1
        flag = False
        # 检查node1的边是否包含node2
        if node1.neighbors is None:
            node1.neighbors = []
        node1_neighbors = node1.neighbors
        for neighbor in node1_neighbors:
            if neighbor == word2:
                update_edge_weight(node1, node2, word_word_edge_list)
                flag = True

        # flag为false，需要添加新边(前提是相似度大于阈值)
        if not flag:
            try:
                if sim2(wv[word1], wv[word2]) >= sim_threshold:
                    # print(word1, word2, sim2(wv[word1], wv[word2]))
                    if word1 < word2:
                        edge1 = Edge(node1, node2, 1)
                        edge2 = Edge(node2, node1, 1)
                        word_word_edge_list.append(edge1)
                        node1.add_neighbor(node2.name)
                        node2.add_neighbor(node1.name)
                    else:
                        edge1 = Edge(node2, node1, 1)
                        edge2 = Edge(node1, node2, 1)
                        word_word_edge_list.append(edge2)
                        node1.add_neighbor(node2.name)
                        node2.add_neighbor(node1.name)
            except KeyError:  # 如果单词不在词典中
                return


def build(file):
    sys.setrecursionlimit(10000000)
    wv = KeyedVectors.load(r'F:\model\ai-lab\ai_lab_model.wv', mmap='r')

    with open(file, 'r', encoding='utf-8') as f:
        data = f.readlines()

    sim_threshold = 0.6
    window_size = 5

    print(len(data))
    word_node_dict = {}  # 单词-节点 hash表
    word_index = 10000
    word_word_edge_list = []
    doc_word_edge_list = []
    doc_nodes_list = []
    word_nodes_list = []

    # 加载Tf-idf对应的单词和tf-idf矩阵
    word_total = np.load(r'F:\mypython\final_subject\bbdw\word.npz')['arr_0']
    print(word_total.shape)

    tf_idf_weight = np.load(r'F:\mypython\final_subject\bbdw\tf-idf-weight.npz')['arr_0']
    print(tf_idf_weight.shape)

    # 把每一个单词编号
    word_index_dict = {}
    word_index = 10000  # 注意后面在查询tf-idf矩阵的时候要
    for word in word_total:
        word_index_dict[word] = word_index
        word_index += 1

    # 保存字典
    # 先创建并打开一个文本文件
    file = open(r'F:\mypython\final_subject\bbdw\word_index_dict.txt', 'w', encoding='utf-8')

    # 遍历字典的元素，将每项元素的key和value分拆组成字符串，注意添加分隔符和换行符
    for k, v in word_index_dict.items():
        file.write(str(k) + ' ' + str(v) + '\n')

    for i in range(len(data)):
        print(i)
        sentence = data[i].strip().split(' ')

        if sentence == '':
            node_doc = Node(str(i), 'd', i, None)
            doc_nodes_list.append(node_doc)
        else:
            # 建立文档节点
            node_doc = Node(str(i), 'd', i, None)
            doc_nodes_list.append(node_doc)

            # 考虑文本长度为1的情况
            if len(sentence) == 1:
                word_only = sentence[0]
                if word_only not in word_node_dict:
                    set_new_node(word_only, 'w', i, node_doc, word_index_dict,
                                 word_node_dict,
                                 tf_idf_weight, doc_word_edge_list,
                                 word_nodes_list)
                else:
                    set_old_node(word_only, 'w', i, node_doc,
                                 word_node_dict, doc_word_edge_list,
                                 word_index_dict, tf_idf_weight
                                 )

            # 只有词语的数量大于等于才考虑
            if len(sentence) >= window_size:
                left = 0
                right = window_size - 1

                # 初始状态
                for l in range(left, right):
                    for r in range(l + 1, right + 1):
                        process_two_word(sentence[l], sentence[r], i,
                                         word_node_dict, node_doc,
                                         wv, sim_threshold, word_word_edge_list,
                                         word_index_dict, tf_idf_weight,
                                         doc_word_edge_list, word_nodes_list
                                         )

                left = left + 1
                right = right + 1

                # 开始滑动
                while right != len(sentence):
                    for pre in range(left, right):
                        process_two_word(sentence[pre], sentence[right], i,
                                         word_node_dict, node_doc,
                                         wv, sim_threshold, word_word_edge_list,
                                         word_index_dict, tf_idf_weight,
                                         doc_word_edge_list, word_nodes_list
                                         )
                    left = left + 1
                    right = right + 1
            else:  # 如果文本长度没有滑动窗口大
                for l in range(len(sentence) - 1):
                    for r in range(l, len(sentence)):
                        process_two_word(sentence[l], sentence[r], i,
                                         word_node_dict, node_doc,
                                         wv, sim_threshold, word_word_edge_list,
                                         word_index_dict, tf_idf_weight,
                                         doc_word_edge_list, word_nodes_list
                                         )

    print(len(doc_nodes_list))
    print(len(word_nodes_list))
    print(len(word_word_edge_list))
    print(len(doc_word_edge_list))

    print(word_word_edge_list)

    nodes_list = doc_nodes_list + word_nodes_list
    print(len(nodes_list), nodes_list)

    edge_list = word_word_edge_list + doc_word_edge_list

    print('================')
    print(len(edge_list))

    graph = Graph(nodes_list, edge_list)

    # 序列化
    with open(r'F:\mypython\final_subject\bbdw\out\word_word_edge_list.txt', 'wb') as f:
        pickle.dump(word_word_edge_list, f)

    with open(r'F:\mypython\final_subject\bbdw\out\doc_word_edge_list.txt', 'wb') as f:
        pickle.dump(doc_word_edge_list, f)

    with open(r'F:\mypython\final_subject\bbdw\out\connected_graph.txt', 'wb') as f:
        pickle.dump(graph, f)


if __name__ == "__main__":
    build('new_file.txt')