import os

from django.shortcuts import render, redirect
from gensim.models import LdaModel
from gensim.corpora import Dictionary
from gensim import models
import numpy as np
import jieba
import networkx as nx
import matplotlib.pyplot as plt
import re
from pyhanlp import HanLP
from ExtractText import models as djmodels
from pathlib import Path


class FinanceText:
    def __init__(self, username, text, preference, modelpath):
        """
        本类提供对金融文本抽取概要的功能

        :param preference:用户偏好选择
        """
        self.preference = preference
        self.username = username
        self.modelpath = modelpath
        self.text = text
        self.stopWords = self.getStopWords()
        self.dictionary, self.train = self.get_dict()

    def getStopWords(self):
        """
        获取stopwords

        :return:stopwords:返回stopwords
        """
        stopWords = set()

        with open('static/files/stopwords.txt', encoding='utf8', mode='r') as file:
            for line in file:
                stopWords.add(line.rstrip())

        return stopWords

    def get_dict(self):
        train = []
        for line in self.text:
            line = list(jieba.cut(line))
            train.append([w for w in line if w not in self.stopWords])
        dictionary = Dictionary(train)
        return dictionary, train

    def train_model(self):
        corpus = [self.dictionary.doc2bow(text) for text in self.train]
        lda = LdaModel(corpus=corpus, id2word=self.dictionary, num_topics=7)
        # 模型的保存/ 加载
        try:
            lda.save(self.modelpath)
        except Exception as e:
            print(e)

    def lda_sim(self, s1):
        lda = models.ldamodel.LdaModel.load(self.modelpath)
        test_doc = list(jieba.cut(s1))
        doc_bow = self.dictionary.doc2bow(test_doc)
        doc_lda = lda[doc_bow]  # 文档1的主题分布
        list_doc1 = [i[1] for i in doc_lda]
        test_doc2 = list(jieba.cut(self.preference))
        doc_bow2 = self.dictionary.doc2bow(test_doc2)
        doc_lda2 = lda[doc_bow2]  # 文档2的主题分布
        list_doc2 = [i[1] for i in doc_lda2]
        try:
            sim = np.dot(list_doc1, list_doc2) / (np.linalg.norm(list_doc1) * np.linalg.norm(list_doc2))
        except ValueError:
            sim = 0
        # 文档相似度，越大越相近
        return sim

    def hanlp_split(self, text):
        """
        分隔hanlp导出的信息

        :param text: hanlp导出的信息
        :return:返回[代号, 文本, 词性, 关系指向, 关系]的数据结构与(代号, 代号)的关系组
        """
        textSplit = text.split('\n')
        textSplit.pop(-1)
        textProcessed = []
        relationGraph = []
        for t in textSplit:
            tmp = t.split('\t')
            # if tmp[1] not in self.stopWords:
            #     wordArray = [tmp[0], tmp[1], tmp[3], tmp[6], tmp[7]]
            #     relation = (tmp[0], tmp[6])
            #     textProcessed.append(wordArray)
            #     relationGraph.append(relation)
            wordArray = [tmp[0], tmp[1], tmp[3], tmp[6], tmp[7]]
            relation = (tmp[0], tmp[6])
            textProcessed.append(wordArray)
            relationGraph.append(relation)

        return textProcessed, relationGraph

    def showGraph(self, graph):
        """
        绘制语句中词汇关系图

        :param graph: 传入图
        :return graphic:返回经过networkx处理的关系图
        """
        graphic = nx.DiGraph()
        for node in range(0, len(graph) + 1):
            graphic.add_node(str(node))
        graphic.add_edges_from(graph)
        # fig, ax = plt.subplots()
        # nx.draw(graphic, ax=ax, with_labels=True)
        # plt.show()
        # print(graphic.degree)

        return graphic

    def removeGraphNode(self, graphic):
        """
        对图进行剪枝（移除出度为1，入度为0的图节点）

        :param graphic: 传入经过networkx处理的图
        :return:gCopy:返回经过剪枝后的图
        """
        # 出度
        outDegree = 1
        # 入度
        inDegree = 0
        gCopy = graphic.copy()
        gIn = gCopy.in_degree(gCopy)
        gOut = gCopy.out_degree(gCopy)
        nodeRemove = []
        for n in gCopy.nodes():
            if gIn[n] == inDegree and gOut[n] == outDegree:
                nodeRemove.append(n)
            if gIn[n] == 0 and gOut[n] == 0:
                nodeRemove.append(n)
        for node in nodeRemove:
            gCopy.remove_node(node)

        return gCopy

    def completeSentence(self, textStructure, nodes):
        """
        拼接关系产生新的句子

        :param textStructure:传入[代号, 文本, 词性, 关系指向, 关系]的数据结构
        :param nodes: 传入经过剪枝后的图
        :return: sentence:生成的句子
        """
        sentence = ''
        for text in textStructure:
            if str(text[0]) in nodes:
                sentence += text[1]

        return sentence


# Create your views here.

def extract_text(request):
    if request.session.get('is_login', None):
        if request.method == "POST":
            keywords = request.POST.get('keywords')
            input_text_origin = request.POST.get('inputBox')
            input_text = str(input_text_origin).split('\n')
            username = request.session['user_name']
            path = r'UserFile/ExtractTextModels/' + username + '/test_lda.model'
            user_dir = Path('UserFile/ExtractTextModels/' + username)
            if not user_dir.is_dir():
                os.mkdir(user_dir)
            if input_text and keywords:
                financeText = FinanceText(username, input_text, keywords, path)
                financeText.train_model()

                topic_words = []
                sentence_words = []
                topic_sentence = []
                dic = {}
                res = []
                finalSentence = ''

                for line in financeText.text:
                    line = re.sub(" ", "", line)
                    line = re.sub("[].\\/_,$%^*(+\"\')]+|[+——（）【】、~@#￥%……&*（）“”《》]+", "", line)
                    # topic_sentence.append(line)
                    topic_words = jieba.lcut(line)
                    # 建立词典统计切分的各个词的频率
                    for word in topic_words:
                        if word not in dic:
                            dic[word] = 1
                        else:
                            dic[word] += 1
                    pattern = r'\.|/|;|\'|`|\[|\]|<|>|\?|:|"|\{|\}|\~|!|@|#|\$|%|\^|&|\(|\)|-|=|\_|\+|。|；|‘|’|【|】|·|！| |…|（|）'
                    result_list = re.split(pattern, line)

                    topic_sentence.extend(result_list)

                    for i in range(len(topic_sentence)):
                        score = 0
                        for word in jieba.lcut(topic_sentence[i]):
                            score += dic.get(word, 0)
                        res.append(score)
                    score_dic = {}
                    for i in range(len(topic_sentence) - 1):
                        temp_similarity = financeText.lda_sim(topic_words[i])
                        score_dic[i] = (res[i] / (len(jieba.lcut(topic_sentence[i])) + 1)) * abs(temp_similarity)
                    result = sorted(score_dic.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
                    result1 = result[:2]
                    for key in result1:
                        # 输出原句
                        # print(topic_sentence[key[0]])
                        # 用HanLP进行分隔
                        ans = HanLP.parseDependency(topic_sentence[key[0]].strip())
                        # print(ans.toString())
                        # 对分隔数据进行处理
                        textStructure, graph = financeText.hanlp_split(ans.toString())
                        # print(graph)
                        # 生成词语关系图
                        graphic = financeText.showGraph(graph)
                        # 对关系图剪枝
                        graphCut = financeText.removeGraphNode(graphic)
                        # print(graphCut.nodes)
                        # 拼凑新的句子
                        sentence = financeText.completeSentence(textStructure, graphCut)
                        finalSentence += sentence + '。'
                        # print(sentence)

                    topic_words = []
                    sentence_words = []
                    topic_sentence = []
                    dic = {}
                    res = []

                new_history = djmodels.ExtractHistory.objects.create()
                new_history.username = username
                new_history.origin = input_text
                new_history.result = finalSentence
                new_history.keywords = keywords
                new_history.save()

                return render(request, 'ExtractText/extract.html',
                              {"finalSentence": finalSentence, "inputText": input_text_origin,
                               "keyWords": keywords})

    else:
        return redirect('/login')

    return render(request, 'ExtractText/extract.html', {"keyWords": "Key Words"})
