# -*-coding: utf-8 -*-
import gensim
import difflib
import time
# import datetime
import re, collections
import sched
import unicodedata
import uuid

import numpy as np
import jieba
import jieba.posseg as pseg
import jieba.analyse
import codecs
from decimal import Decimal


# from nltk.corpus import brown
from numpy import linalg
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer


#加载自定义词典
jieba.load_userdict("txt/userdict.txt")
 
#关联度分析

def loadWords():
    fdMpdel = open("txt/lufax.txt",'r', encoding='UTF-8')
    fo = open("txt/lufax_train.txt", "w", encoding='UTF-8')
    while True:
        gnodel = fdMpdel.readline()
#         fo.write("%s \n" % modelStopWord(gnodel))
        fo.write("%s \n" % cut_words(gnodel))
#         fo1.write("%s " % gname)
        if not gnodel:
            break
    fo.close()


def cut_words(text):
    stop_words = 'txt/stopWords.txt'
    stopwords = codecs.open(stop_words, 'r', encoding='utf8').readlines()
    stopwords = [ w.strip() for w in stopwords ]
    stop_flag = ['x', 'c', 'u', 'd', 'p', 't', 'uj', 'm', 'f', 'r', 'ul']
    resultStr = ''
    words = pseg.cut(text)
    for word, flag in words:
        if flag not in stop_flag and word not in stopwords and len(word) > 1:
            resultStr = resultStr + word + ' '
    return resultStr

def getSentences(path):
    '''
    获得用于词向量训练的句子
    :param path: 文件存储的路径
    :return: list[[单词],[单词].....]
    '''
    sents = []
    f = open(path, "r", encoding="utf8")
    for line in f:
        line = line.strip()
        if line != "":
            line_shuzu = line.split(" ")  # 按照2个空格拆分
            sen = []
            for i in range(len(line_shuzu)):
                sen.append(line_shuzu[i])
            sents.append(sen)
    return sents

loadWords()
gmodelPath = "txt/lufax_train.txt"

print("正在训练......")
#学习代码
gModelSentences = getSentences(gmodelPath)
gModelModel = gensim.models.Word2Vec(gModelSentences, min_count=1, size=100, workers=2, window=5, iter=1000)#gmodel模型
gModelModel.save("txt/lufax_train")  # 保存模型
# print(model["食品"])  # 打印中国的词向量


gModel_new_model = gensim.models.Word2Vec.load("txt/lufax_train")  # 载入模型

gNameresult = gModel_new_model.most_similar(['陆金所'],topn=20)
print("gName模型--------------------")


allNames = '';
results = '';
for item in gNameresult:
    result = Decimal(item[1]).quantize(Decimal("0.0000"));
    name = item[0]
    #print('' + item[0] + '---相似度:' + str(item[1]))
    print('' + name + '---相似度:' + str(result))
    allNames = allNames + '\'' + name + '\''+ ','
    results = results + str(result) + ',' 
print(allNames)
print(results)


