#! -*- coding:utf-8 -*-
# 此函数作用是对初始语料进行分词处理后，作为训练模型的语料
import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
from django.http import HttpResponse
from gensim.models import word2vec
import gensim
import logging
import jieba
import os
import codecs
import requests
import json
from elasticsearch import Elasticsearch
import math
import multiprocessing

def cut_txt(old_file):

    # global cut_file     # 分词之后保存的文件名
    cut_file = old_file + '_cut.txt'

    try:
        fi = codecs.open(old_file, 'r',encoding='utf-8')
    except BaseException as e:  # 因BaseException是所有错误的基类，用它可以获得所有错误类型
        print(Exception, ":", e)    # 追踪错误详细信息

    text = fi.read()  # 获取文本内容
    new_text = jieba.cut(text, cut_all=False)  # 精确模式
    # str_out = ' '.join(new_text).replace('，', '').replace('。', '').replace('？', '').replace('！', '') \
    #     .replace('“', '').replace('”', '').replace('：', '').replace('…', '').replace('（', '').replace('）', '') \
    #     .replace('—', '').replace('《', '').replace('》', '').replace('、', '').replace('‘', '') \
    #     .replace('’', '')     # 去掉标点符号
    fo = codecs.open(cut_file, 'w',encoding='utf-8')
    fo.write(' '.join(new_text))


def model_train(train_file_name, save_model_file):  # model_file_name为训练语料的路径,save_model为保存模型名
    save_model_name = 'word2vec1.model'
    # 模型训练，生成词向量
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
    sentences = word2vec.Text8Corpus(train_file_name)  # 加载语料
    model = gensim.models.Word2Vec(sentences, size=200,min_count=1,window=3)  # 训练skip-gram模型; 默认window=5
    model.save(save_model_file)
    model.wv.save_word2vec_format(save_model_name + ".bin", binary=True)   # 以二进制类型保存模型以便重用



def getModel():
    # hsds.txt_cut.txt 已经切割好的文本
    # hsds.txt 原始文本
    # cut_file = 'scatterModel/hsds.txt_cut.txt'
    # if not os.path.exists(cut_file):  # 判断文件是否存在，参考：https://www.cnblogs.com/jhao/p/7243043.html
    #     cut_txt('scatterModel/hsds.txt')  # 须注意文件必须先另存为utf-8编码格式

    cut_file='scatterModel/cutWord.txt'
    # test_word2vec1.model 训练好的模型

    save_model_name = 'scatterModel/word2vec1.model'
    if not os.path.exists(save_model_name):  # 判断文件是否存在
        model_train(cut_file, save_model_name)
    else:
        pass
        # print('此训练模型已经存在，不用再次训练')

    # 加载已训练好的模型
    model_1 = word2vec.Word2Vec.load(save_model_name)

    return model_1

def insertDataArray(allKeyWordArray,data):
    for dd in data:
        if dd not in allKeyWordArray:
            allKeyWordArray.append(dd)
    return allKeyWordArray

def getCategory(allNodes,currentKey):
    for node in allNodes:
        if node["name"] == currentKey:
            return node["category"]

def calLoginData(allLoginData,allKeyWordArray=[],tempData = [],currentKey='',allNodes=[],num = 1):

    # print(allLoginData)
    # print(allKeyWordArray)
    # print(tempData)
    #
    # exit()
    for dd in tempData:
        tempJson = {
            "source":"",  #自身
            "target":"",   #连接的id
            "name":"1.0"
        }

        nodeJson = {
            "id":"",
            "name": "",
            "category": "",
            # "symbolSize": "",
            "value":""
        }

        if dd in allKeyWordArray:
            flag = 0
            for loginLine in allLoginData[::-1]:
                currentSource = allKeyWordArray.index(dd)
                if (loginLine["source"] == currentSource) and (loginLine["target"] == allKeyWordArray.index(currentKey)):
                    flag = 1
                    break

            for loginLine in allLoginData[::-1]:
                currentSource = allKeyWordArray.index(dd)
                if (loginLine["target"] == currentSource) and (loginLine["source"] == allKeyWordArray.index(currentKey)):
                    flag = 1
                    break

            if flag == 0:
                tempJson["source"] = allKeyWordArray.index(dd)
                tempJson["target"] = allKeyWordArray.index(currentKey)
                allLoginData.append(tempJson)

        else:
            allKeyWordArray.append(dd)
            tempJson["source"] = allKeyWordArray.index(dd)
            tempJson["target"] = allKeyWordArray.index(currentKey)
            allLoginData.append(tempJson)

            nodeJson["id"] = allKeyWordArray.index(dd)
            nodeJson["name"] = dd
            nodeJson["category"] = getCategory(allNodes,currentKey)
            # nodeJson["symbolSize"] = 1.5 - (num * 0.2)
            nodeJson["value"] = 1.5 - (num * 0.2)

            allNodes.append(nodeJson)


    return allLoginData,allKeyWordArray,allNodes


def toAlTeamList(data):
    allData = []
    for dd in data:
        tempJson = {
            "name":""
        }
        tempJson["name"] = dd.replace("#"," ")
        allData.append(tempJson)

    return allData

    #多进程拿取数据
def getAllSimidataProcess(keyword,model_1):
    y2 = model_1.most_similar(keyword, topn=5)
    return y2

#处理参数
def handle(strs):
    allData = []
    for dd in strs.split(","):
        allData.append(dd.replace(" ","#"))
    return allData

def index(request):

    # return HttpResponse([1], content_type="application/json")
    result = {
        "alTeam": [],
        "similarySource": "",
        "nodeList": "",
        "alTeamList" : "",
        "similarityArray":""
    }
    # 加载已训练好的模型
    model_1 = getModel()

    keyWords = request.POST.get("keyWord", "world")

    # trainArray = ["legal#risk","asset#securitization","insurance#company","insurance#market","creative#accounting","liability#law","liability#insurance","wealth#management","asset#valuation","property#insurance"]
    # inintall = ["legal#risk","asset#securitization","insurance#company","insurance#market","creative#accounting","liability#law","liability#insurance","wealth#management","asset#valuation","property#insurance"]

    trainArray = handle(keyWords)
    inintall = handle(keyWords)

    notMoreKeys = trainArray

    result["alTeam"] = [dd.replace("#"," ") for dd in trainArray]
    result["alTeamList"] = toAlTeamList(inintall)


    allSimidata = []

    # --------------------------------------------------------------------------------------------------------


    # cpuNum = len(trainArray)
    #
    # pool = multiprocessing.Pool(processes = cpuNum)
    #
    # results = []
    # for keyword in trainArray:
    #
    #     res = pool.apply_async(getAllSimidataProcess,
    #                            (keyword, model_1))  # 异步开启进程, 非阻塞型, 能够向池中添加进程而不等待其执行完毕就能再次执行循环
    #     results.append(res)
    #
    #
    # pool.close()  # 关闭pool, 则不会有新的进程添加进去
    # pool.join()  # 必须在join之前close, 然后join等待pool中所有的线程执行完毕
    #
    # # print(results)
    #
    # for i in results:
    #     dd = i.get()  # 获得进程的执行结果
    #     try:
    #         allSimidata.append(dd)
    #     except:
    #         # print(dd)
    #         continue

#--------------------------------------------------------------------------
    notMoreFive = {}

    y2 = []
    for dd in trainArray:
        # currentCategory = getCategory(allNodes,dd)
        try:
            y2 = model_1.most_similar(dd, topn=5)  # 10个最相关的
        except:
            notMoreFive[dd] = notMoreKeys.index(dd)

        allSimidata.append(y2)

    # return HttpResponse([allSimidata], content_type="application/json")

    similarySource, nodeList, similarityArray,tempKeyWord = operation(trainArray,allSimidata)

    similarySource, similarityArray = formNotMoreThree(notMoreFive, similarySource, similarityArray, tempKeyWord)

    result["similarySource"] = similarySource
    result["nodeList"] = nodeList
    result["similarityArray"] = similarityArray


    # print(result)
    # exit()

    return HttpResponse(json.dumps(result), content_type="application/json")
    # result["similarySource"] = allLoginData
    # result["nodeList"] = allNodes
    # print(result)
        # print(item[0],item[1])
def formNotMoreThree(notMoreFive, similarySource, similarityArray, tempKeyWord):

    for keyWord in notMoreFive.keys():
        similarySourceJson = {
            "source": tempKeyWord.index(keyWord),
            "target": tempKeyWord.index(keyWord),
            "name": "1.0"
        }
        similarySource.append(similarySourceJson)

        similarityArrayJson = {
            "category": notMoreFive[keyWord],
            "categoryName": keyWord,
            "correlation": 0.7113348245620728,
            "word1": "",
            "word2": ""
        }
        similarityArray.append(similarityArrayJson)

    return similarySource, similarityArray
        #获取所有关键字
def getAllKeyWord(trainArray,data):
    allData = trainArray

    for dd in data:
        for d in dd:
            if d[0] not in allData:
                allData.append(d[0])

    return allData

def getSimilarySource(data,allKeyWord):
    similarySource = []

    for i in range(len(data)):
        for d in data[i]:
            tempJson = {
                "source": "",  # 自身
                "target": "",  # 连接的id
                "name": "1.0"
            }
            tempJson["source"] = i
            tempJson["target"] = allKeyWord.index(d[0])
            similarySource.append(tempJson)

    return similarySource

def getNodeList(allKeyWord,data):
    nodeList = []
    similarityArray = []

    allArray = []

    for i in range(len(data)):
        initialNode = {
            "id":i,
            "name": allKeyWord[i].replace("#"," "),
            "category": i,
            # "symbolSize":5,
            "value": 1.25
        }
        nodeList.append(initialNode)
        allArray.append(allKeyWord[i])


    for i in range(len(data)):
        for d in data[i]:
            temp = {
                "id" : "",
                "name" : "",
                "category":i,
                # "symbolSize":5,
                "value": 1
            }

            #防止重复
            if d[0] in allArray:
                continue
            allArray.append(d[0])

            temp["id"] = allKeyWord.index(d[0])
            temp["name"] = d[0].replace("#"," ")
            temp["category"] = i
            nodeList.append(temp)

    return nodeList

def getSimilarityArray(trainArray,data):
    similarityArray = []
    for i in range(len(data)):
        for d in data[i]:
            tempSimilarityJson = {
                'category': "",
                'categoryName': '',
                'correlation': "",
                'word1': '',
                'word2': ''
            }
            tempSimilarityJson["category"] = i
            tempSimilarityJson["categoryName"] = trainArray[i].replace("#"," ")
            tempSimilarityJson["correlation"] = d[1]
            tempSimilarityJson["word1"] = trainArray[i].replace("#"," ")
            tempSimilarityJson["word2"] = d[0].replace("#"," ")
            similarityArray.append(tempSimilarityJson)

    return similarityArray

#构造簇的图的数据

def operation(trainArray,data):

    allKeyWord = getAllKeyWord(trainArray,data)

    tempKeyWord = allKeyWord


    similarySource = getSimilarySource(data,allKeyWord)

    nodeList = getNodeList(trainArray,data)

    similarityArray = getSimilarityArray(trainArray,data)

    return similarySource, nodeList, similarityArray, tempKeyWord

    # for i in range(len(data)):
    #     tempJson = {
    #         "source": "",  # 自身
    #         "target": "",  # 连接的id
    #         "name": "1.0"
    #     }


def esConnect():
    es = Elasticsearch(
        ['124.237.78.13'],
        # http_auth=('elastic', 'passwd'),
        port=9200
    )
    return es

def getDataFromEs(query):
    es = esConnect()
    result = es.search(index="group_data", doc_type="wos_ture", body=query)
    return result

def formDataFromEs(data):
    allData = []
    for dd in data["hits"]["hits"]:
        allData.append(dd["_source"])
    return allData

def getDataByKeyWord(keyword):
    pass


def detailList(request):
    result = {
        "bookList":[],
        "pageInfo":{
            "allNum":"",
            "currentPage":"",
            "allPage":""
        }
    }

    keyWord = request.POST.get("keyWord", "")
    page = int(request.POST.get("page", 1))
    limit = int(request.POST.get("limit", 10))


    #关键词
    # keyWord = "high-viscosity network"
    #
    # #页码
    # page = 1
    #
    # #每页多少条
    # limit = 10


    query = {'query': {'match': {'de': keyWord}},"from" : (page - 1)*limit, "size" : limit}
    data = getDataFromEs(query)

    try:
        result["bookList"] = formDataFromEs(data)
    except:

        pass
    try:
        result["pageInfo"]["allNum"] = data["hits"]["total"]
        result["pageInfo"]["currentPage"] = page
        result["pageInfo"]["allPage"] = math.ceil(data["hits"]["total"]/limit)
    except:
        pass



    # result = {
    #     "bookList":""
    # }
    # alldata = []
    # tempJson = {
    #     "bookName":"bookName",
    #     "author": "author",
    #     "bookTime": "bookTime",
    #     "pmid": "pmid",
    # }
    # alldata.append(tempJson)
    # alldata.append(tempJson)

    # result["bookList"] = alldata



    return HttpResponse(json.dumps(result), content_type="application/json")
    # print(result)

