# -*- coding: utf-8 -*-
"""
Created on Wed Jan  9 14:20:44 2019

@author: caixue1
"""
import gensim
from gensim.models import Word2Vec
import jieba
import Core.MongoDB as db
import numpy as np
import pandas as pd
import codecs
import Core.MySQLDB as MySQLDB

def Text_to_WordSegments_Batch(database):
    #filter = {"limit": 100}
    filter = {}
    articleDocs = database.Find("Recommender", "Article", filter)
    
    i = 0
    context = []
    for articleDoc in articleDocs:
        i += 1
        #if i < 19000:
            #continue
        text = articleDoc["segments"]
        context.append(text)

    return context


#导入词汇表
def GetVocabulary(pathfilename):
    vocabulary = []
    with open(pathfilename, encoding='utf-8') as f:     #"D:/vocabulary.txt"
        lines = f.readlines()
        for line in lines:
            vocabulary.append(line[0:-1])
    f.close()
    return vocabulary


#确定word2vec模型最优向量长度
def BestVectorSize(vocabulary, context, start_size, end_size, step):
    effect = []
    for s in range(start_size, end_size, step):
        model = Word2Vec(context, min_count=1, size=s, window=5, workers=4, sg=0)

        corr_list = []
        for i in range(len(vocabulary) - 1):
            term1 = vocabulary[i]
            for j in range(i + 1, len(vocabulary)):
                term2 = vocabulary[j]
                try:
                    corr = model.similarity(term1, term2)
                    corr_list.append(corr)
                except:
                    continue

        std = np.std(corr_list)
        mean = np.mean(corr_list)
        print(s, std, mean)
        effect.append([s, std])

    effect_df = pd.DataFrame(effect, columns=['size', 'std']).set_index('size')
    effect_df.plot()
    return effect_df



#最优size是300，存储训练后的词向量
def SaveVocabularyVector(database, vocabulary, context):
    model = Word2Vec(context, min_count=1, size=300, window=5, workers=4, sg=0)
    vocabulary_in = []
    for term in vocabulary:
        try:
            term_vec = model[term].astype(float)
            vocabulary_in.append(term)
            print(term, "in")

            vectorDoc = {}
            vectorDoc["Term"] = term
            vectorDoc["Vector"] = list(term_vec)
            database.upsert("Label", "Tag_Vec", {"Term": vectorDoc["Term"]}, vectorDoc)


        except:
            print(term, "notin")

    return vocabulary_in


#计算词汇间的相关性，入库
def LoadCorrelation(database):
    vocabulary_in = GetVocabulary(pathfilename="D:/vocabulary_in.txt")

    verctorByWord = {}
    vectors = database.find("Label", 'Tag_Vec')
    for vectorDoc in vectors:
        term = vectorDoc["Term"]
        vector = np.array(vectorDoc["Vector"])
        verctorByWord[term] = vector


    for i in range(len(vocabulary_in)-1):
        term1 = vocabulary_in[i]

        if len(term1) == 0:
            continue

        if i < 10400:
            continue

        #if i < 18:
            #continue

        term1_vec = verctorByWord[term1]

        for j in range(i+1, len(vocabulary_in)):
            term2 = vocabulary_in[j]

            if len(term2) == 0:
                continue

            term2_vec = verctorByWord[term2]

            numerator = (term1_vec * term2_vec).sum()
            denominator = np.linalg.norm(term1_vec) * np.linalg.norm(term2_vec)
            cos = numerator / denominator
            print(i, term1, term2, cos)

            correlationDoc = {}
            correlationDoc["Term1"] = term1
            correlationDoc["Term2"] = term2
            correlationDoc["Correlation"] = float(cos)
            database.upsert("Label", "Correlation", {"Key": term1 + "_" + term2}, correlationDoc)


#从mongo库算词的近义词
def GetTermSimilary_ByMonGo(database, term_list, topk):
    BestSimilarDoc = {}
    for term in term_list:
        term_corr1 = database.find("Label", "Correlation", query={"Term1": term})
        term_corr2 = database.find("Label", "Correlation", query={"Term2": term})

        corrByTerm = []
        for corrDoc1 in term_corr1:
            key1 = corrDoc1["Key"]
            corr1 = corrDoc1["Correlation"]
            corrByTerm.append([key1, corr1])

        for corrDoc2 in term_corr2:
            key2 = corrDoc2["Key"]
            corr2 = corrDoc2["Correlation"]
            corrByTerm.append([key2, corr2])

        corrByTerm_df = pd.DataFrame(corrByTerm, columns=['key', 'corr'])
        corrByTerm_df = corrByTerm_df.set_index('key')
        corrByTerm_df = corrByTerm_df.sort_values(by=['corr'], ascending=False)
        corrByTerm_df_topk = corrByTerm_df.iloc[0:topk, :]

        BestSimilar = []
        for i in range(topk):
            BestSimilar.append([corrByTerm_df.index.tolist()[i], corrByTerm_df.ix[i, 'corr']])
            print(term, corrByTerm_df.index.tolist()[i], corrByTerm_df.ix[i, 'corr'])
        BestSimilarDoc['term'] = BestSimilarDoc
        
    return BestSimilarDoc


#从mysql库算词的近义词
def GetTermSimilary_ByMySql(database, term_list, topk):
    BestSimilarDoc = {}
    for term in term_list:
        term_corr1 = database.Find("label", "tag_correlation", filter={"tag1": term})
        term_corr2 = database.Find("label", "tag_correlation", filter={"tag2": term})

        corrByTerm = []
        for corrDoc1 in term_corr1:
            key1 = corrDoc1["tag1"] + '_' + corrDoc1["tag2"]
            corr1 = corrDoc1["correlation"]
            corrByTerm.append([key1, corr1])

        for corrDoc2 in term_corr2:
            key2 = corrDoc2["tag1"] + '_' + corrDoc2["tag2"]
            corr2 = corrDoc2["correlation"]
            corrByTerm.append([key2, corr2])

        corrByTerm_df = pd.DataFrame(corrByTerm, columns=['key', 'corr'])
        #corrByTerm_df = corrByTerm_df.set_index('key')
        corrByTerm_df = corrByTerm_df.sort_values(by=['corr'], ascending=False)
        corrByTerm_df.index = range(len(corrByTerm_df))
        #corrByTerm_df_topk = corrByTerm_df.iloc[0:topk, :]
        #print(corrByTerm_df)

        BestSimilar = []
        for i in range(topk):
            tag1 = corrByTerm_df.loc[i, 'key'].split('_')[0]
            tag2 = corrByTerm_df.loc[i, 'key'].split('_')[1]
            if tag1 == term:
                tag_similar = tag2
            else:
                tag_similar = tag1
            #BestSimilar.append([i, tag_similar, corrByTerm_df.loc[i, 'corr']])
            BestSimilar.append(tag_similar)
            #print(term, tag_similar, corrByTerm_df.loc[i, 'corr'])
        BestSimilarDoc[term] = BestSimilar

    return BestSimilarDoc

#FindWithSQL算词的近义词缓存
def GetTermSimilary_FindWithSql(database, term_list, topk):
    cacheDoc = {}
    count = 0
    for term in term_list:
        count += 1
        print(count)
        sqlCommand = "SELECT * FROM label.tag_correlation where tag1='"+term+"' or tag2='"+term+"' order by correlation desc limit "+str(topk)
        BestSimilar = database.FindWithSQL('label', 'tag_correlation', sqlCommand)
        similar_tag = []
        for BestSimilarDoc in BestSimilar:
            tag1 = BestSimilarDoc['tag1']
            tag2 = BestSimilarDoc['tag2']
            if tag1 == term:
                similar_tag.append(tag2)
            else:
                similar_tag.append(tag1)

        cacheDoc[term] = similar_tag
    return cacheDoc


#根据文章tag联想股票、行业、基金tag
def Article_Tag_Associate(database):
    #提取stock_list,industry_list, fund_list
    tag_stock = database.Find('label', 'tag', filter={"class_id": 174})
    stock_list = []
    for stockDoc in tag_stock:
        stock_list.append(stockDoc['name'])

    tag_industry = database.Find('label', 'tag', filter={"class_id": 176})
    industry_list = []
    for industryDoc in tag_industry:
        industry_list.append(industryDoc['name'])

    tag_fund = database.Find('label', 'tag', filter={"class_id": 183})
    fund_list = []
    for fundDoc in tag_fund:
        fund_list.append(fundDoc['name'])
    print('list done')

    #缓存表
    #BestSimilarDoc_stock = GetTermSimilary_FindWithSql(database, term_list=stock_list, topk=10)
    #print('cache stock done')
    BestSimilarDoc_industry = GetTermSimilary_FindWithSql(database, term_list=industry_list, topk=10)
    print('cache industry done')
    #BestSimilarDoc_fund = GetTermSimilary_FindWithSql(database, term_list=fund_list, topk=10)
    #print('cache fund done')

    #获取所有文章id
    articleId = database.Find('text', 'news')
    articleId_list = []
    for articleIdDoc in articleId:
        articleId_list.append(articleIdDoc['ID'])
    print('articleid done')

    count = 0
    for artid in articleId_list:
        count += 1
        if count < 2:
            continue
        articleTag = database.Find('label', 'article_tag', filter={"article_id": artid})
        ariticleTag_list = []
        for articleTagDoc in articleTag:
            article_tag = articleTagDoc['tag']
            ariticleTag_list.append(article_tag)   #文章的tag_list

        print(ariticleTag_list)

        associate_tag = []
        #与缓存表比较
        '''
        for stock in stock_list:
            if len(list(set(BestSimilarDoc_stock[stock]) & set(ariticleTag_list))) != 0:
                associate_tag.append([stock, 'Associate_stock'])
        '''
        for industry in industry_list:
            if len(list(set(BestSimilarDoc_industry[industry]) & set(ariticleTag_list))) != 0:
                associate_tag.append([industry, 'Associate_industry'])
        '''
        for fund in fund_list:
            if len(list(set(BestSimilarDoc_fund[fund]) & set(ariticleTag_list))) != 0:
                associate_tag.append([fund, 'Associate_fund'])
        '''
        print(associate_tag)

        if len(associate_tag) != 0:
            for associtag in associate_tag:
                newDoc = {}
                newDoc["article_id"] = artid
                newDoc["tag"] = associtag[0]
                newDoc["tag_id"] = database.Find('label', 'tag', filter={"name": associtag[0]})[0]['id']
                newDoc["method"] = associtag[1]
                database.Insert("label", "article_asset_tag", document=newDoc)
                                #target={"article_id": article_id, "tag_id": tag_id},



#database = db.MongoDB("10.13.144.119", "27017")
#databaseSimulation = MySQLDB.MySQLDB("10.13.144.119", "3306", username="root", password="kirk2019")
#database.creatIndex("Label","Correlation","Term1")
#database.creatIndex("Label","Correlation","Term2")
#print('context start')
#context = Text_to_WordSegments_Batch(database)
#print('context done')
#vocabulary = GetVocabulary(pathfilename="D:/vocabulary0.txt")
#print('vocabulary done')
#effect_df = BestVectorSize(vocabulary, context, start_size=25, end_size=100, step=10)
#print(effect_df)
#effect_df.to_csv('D:/effect_size.csv', encoding='GBK')
#database.creatIndex("Label", "Correlation", "Key")
#BestSimilarDoc = GetTermSimilary_ByMySql(databaseSimulation, term='万达', topk=5)
#Article_Tag_Associate(databaseSimulation)

# 将mongo库的相关性存入sql库
import datetime

def TransitTagCorrelationSub(documents, tagidByName):


    newDocuments = []
    for document in documents:
        # print(document)
        term1 = document["Term1"]
        term2 = document["Term2"]
        #
        if term1 not in tagidByName:
            print(term1)
            continue
        if term2 not in tagidByName:
            print(term2)
            continue

        # Check Greater
        if tagidByName[term1] < tagidByName[term2]:
            tmp = term1
            term1 = term2
            term2 = tmp

        newDocument = {}
        newDocument["tag1"] = term1
        newDocument["tag_id1"] = tagidByName[term1]
        newDocument["tag2"] = term2
        newDocument["tag_id2"] = tagidByName[term2]
        newDocument["correlation"] = document["Correlation"]
        newDocuments.append(newDocument)
    #
    return newDocuments


def TransitTagCorrelation(mongo, mysql):
    #
    classDoc = mysql.Find("Label", "Tag")
    tagidByName = {}
    for doc in classDoc:
        tagidByName[doc["name"]] = doc["id"]

    count = 18000000
    step = 10000
    loops = int(count / step)

    for i in range(loops):
        if i < 1320:
            continue
        print(i, i * step, step, datetime.datetime.now())
        filter = {"skip": i * step, "limit": step}
        documents = mongo.Find("Label", "Correlation", filter)
        newDocuments = TransitTagCorrelationSub(documents, tagidByName)

        #
        mysql.Insert_Many("Label", "Tag_Correlation", newDocuments)
        pass


mongo = db.MongoDB("10.13.144.119", "27017")
mysql = MySQLDB.MySQLDB("10.13.144.119", "3306", username="root", password="kirk2019")
TransitTagCorrelation(mongo, mysql)








'''
fo = open("D:/afterSeg.txt","w")
fo.write(context)
print("finished!")
fo.close()
sentences0 = word2vec.Text8Corpus(u"D:/afterSeg.txt")
sentences = word2vec.LineSentence(u"D:/afterSeg.txt")

y1=model.similarity(u"基金业", u"基金") #计算两个词之间的余弦距离
print(y1)

y2 = model.most_similar("基金", topn=10)
for item in y2:
    print(item[0], item[1])

print(model.doesnt_match(u"分类 聚类 传入".split()))

tmp = model.vocab
print(len(tmp))

model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1)

model['分类']


documents = []
for i in range(len(train_data)):
    wordlist = train_data[i]
    documents.append(gensim.models.doc2vec.TaggedDocument(wordlist, [str(i)]))

model = gensim.models.Doc2Vec(documents,dm = 0, alpha=0.1, size= 20, min_alpha=0.025)

print(model.docvecs.most_similar('0'))

print(model.docvecs.similarity('0','1'))

print(model.docvecs['10'])

words = u"央行 宣布 降准"
print(model.infer_vector(words.split()))




from  textrank4zh import TextRank4Keyword,TextRank4Sentence
import codecs
 
file = r"D:\article_test.txt"
text = codecs.open(file,'r','utf-8').read()
 
word = TextRank4Keyword()
 
word.analyze(text,window = 2,lower = True)
w_list = word.get_keywords(num = 20,word_min_len = 1)
 
print '关键词:'
print
for w in w_list:
   print w.word,w.weight
print
phrase = word.get_keyphrases(keywords_num = 5,min_occur_num=2)
 
print '关键词组:'
print 
for p in phrase:
 print p
print
sentence = TextRank4Sentence()
 
sentence.analyze(text,lower = True)
s_list = sentence.get_key_sentences(num = 3,sentence_min_len = 5)
 
print '关键句:'
print
for s in s_list:
 print s.sentence,s.weight
print
'''



