# -*- coding: utf-8 -*-
"""
Created on Wed Jan  9 14:20:44 2019

@author: caixue1
"""
import gensim
from gensim.models import Word2Vec
import jieba
from jieba import posseg
import Core.MongoDB as db
import numpy as np
import pandas as pd
import codecs
import Core.MySQLDB as MySQLDB
import datetime
from bson.objectid import ObjectId

#获取语料库分词list
def Text_to_WordSegments_Batch1(database):
    #filter = {"limit": 100}
    filter = {}
    articleDocs = database.Find("Recommender", "Article", filter)
    
    i = 0
    context = []
    for articleDoc in articleDocs:
        i += 1
        #if i < 19000:
            #continue
        text = articleDoc["segments"]
        context.append(text)

    return context

def WordSegment_Jieba(text):
    jieba.load_userdict("D:/vocabulary_new.txt")  # filepath = "D:/vocabulary.txt"
    # 词性
    pos = ['n', 'nz', 'nt', 'nr', 'ns', 'v', 'vn', 'j', 'x']
    raw_segments = posseg.cut(text)
    segments = []
    for j in raw_segments:
        if (len(j.word) >= 2) and (j.flag in pos):
            segments.append(j.word)

    return segments

def Text_to_WordSegments_Batch(database):
    filter = {"limit": 5000}
    industry = database.Find("Text", "IndustryReport", filter)
    print('industry done')
    filter = {"limit": 5000}
    research = database.Find("Text", "ResearchReport", filter)
    print('research done')
    context = []
    for industryDoc in industry:
        text = industryDoc["Content"]
        segments = WordSegment_Jieba(text)
        context.append(segments)

    for researchDoc in research:
        text1 = researchDoc["Content"]
        segments1 = WordSegment_Jieba(text1)
        context.append(segments1)

    return context

#导入词汇表
def GetVocabulary(pathfilename):
    vocabulary = []
    with open(pathfilename, encoding='utf-8') as f:     #"D:/vocabulary.txt"
        lines = f.readlines()
        for line in lines:
            vocabulary.append(line.split(' ')[0])
    f.close()
    return vocabulary


#确定word2vec模型最优向量长度
def BestVectorSize(vocabulary, context, start_size, end_size, step, term_window):
    effect = []
    for s in range(start_size, end_size, step):
        model = Word2Vec(context, min_count=1, size=s, window=term_window, workers=4, sg=0)  #term_window=5表示前后各取5个

        corr_list = []
        for i in range(len(vocabulary) - 1):
            term1 = vocabulary[i]
            for j in range(i + 1, len(vocabulary)):
                term2 = vocabulary[j]
                try:
                    corr = model.similarity(term1, term2)
                    corr_list.append(corr)
                except:
                    continue

        std = np.std(corr_list)
        mean = np.mean(corr_list)
        print(s, std, mean)
        effect.append([s, std])

    effect_df = pd.DataFrame(effect, columns=['size', 'std']).set_index('size')
    effect_df.plot()
    return effect_df



#最优size是300，存储训练后的词向量
def SaveVocabularyVector(database, vocabulary, context, best_size, term_window):
    model = Word2Vec(context, min_count=1, size=best_size, window=term_window, workers=4, sg=0)
    vocabulary_in = []
    for term in vocabulary:
        try:
            term_vec = model[term].astype(float)
            vocabulary_in.append(term)
            #print(term, "in")

            vectorDoc = {}
            vectorDoc["Term"] = term
            vectorDoc["Vector"] = list(term_vec)
            database.upsert("Label", "Tag_Vec", {"Term": vectorDoc["Term"]}, vectorDoc)


        except:
            print(term, "notin")

    f = codecs.open("D:/vocabulary_in.txt", 'w+', 'utf-8')
    for word in vocabulary_in:
        #print(word)
        if type(word) != str:
            continue
        f.write(word + '\r\n')
    f.close

    return vocabulary_in


#计算词汇间的相关性，入库
def LoadCorrelation(database, vocabulary_in):
    vocabulary_stock = GetVocabulary(pathfilename="D:/vocabulary_stock.txt")

    verctorByWord = {}
    vectors = database.find("Label", 'Tag_Vec')
    for vectorDoc in vectors:
        term = vectorDoc["Term"]
        vector = np.array(vectorDoc["Vector"])
        verctorByWord[term] = vector


    for i in range(len(vocabulary_in)-1):
        term1 = vocabulary_in[i]
        if term1 not in vocabulary_stock:
            continue

        #if len(term1) == 0:
            #continue

        #if i < 10400:
            #continue

        #if i < 18:
            #continue

        term1_vec = verctorByWord[term1]

        for j in range(i+1, len(vocabulary_in)):
            term2 = vocabulary_in[j]

            if len(term2) == 0:
                continue

            term2_vec = verctorByWord[term2]

            numerator = (term1_vec * term2_vec).sum()
            denominator = np.linalg.norm(term1_vec) * np.linalg.norm(term2_vec)
            cos = numerator / denominator
            #print(i, term1, term2, cos)

            correlationDoc = {}
            correlationDoc["Term1"] = term1
            correlationDoc["Term2"] = term2
            correlationDoc["Correlation"] = float(cos)
            database.upsert("Label", "Correlation", {"Key": term1 + "_" + term2}, correlationDoc)


#从mongo库算词的近义词
def GetTermSimilary_ByMonGo(database, term_list, topk):
    BestSimilarDoc = {}
    count = 0
    for term in term_list:
        count += 1
        print(count)
        term_corr1 = database.find("Label", "Correlation", query={"Term1": term})
        term_corr2 = database.find("Label", "Correlation", query={"Term2": term})
        if len(term_corr1) == 0 and len(term_corr2) == 0:
            #print(term, 'none')
            continue
        #print('database done')
        corrByTerm = []
        for corrDoc1 in term_corr1:
            key1 = corrDoc1["Key"]
            corr1 = corrDoc1["Correlation"]
            corrByTerm.append([key1, corr1])

        for corrDoc2 in term_corr2:
            key2 = corrDoc2["Key"]
            corr2 = corrDoc2["Correlation"]
            corrByTerm.append([key2, corr2])

        corrByTerm_df = pd.DataFrame(corrByTerm, columns=['key', 'corr'])
        #corrByTerm_df = corrByTerm_df.set_index('key')
        corrByTerm_df = corrByTerm_df.sort_values(by=['corr'], ascending=False)
        corrByTerm_df.index = range(len(corrByTerm_df))
        #corrByTerm_df_topk = corrByTerm_df.iloc[0:topk, :]

        #print(corrByTerm)
        #BestSimilar = corrByTerm.sort(key=lambda x: x[1], reverse=True)
        #print(BestSimilar)

        BestSimilar = []
        for i in range(topk):
            tag1 = corrByTerm_df.loc[i, 'key'].split('_')[0]
            tag2 = corrByTerm_df.loc[i, 'key'].split('_')[1]
            if tag1 == term:
                tag_similar = tag2
            else:
                tag_similar = tag1
            BestSimilar.append(tag_similar)

        BestSimilarDoc[term] = BestSimilar
        
    return BestSimilarDoc


#从mysql库算词的近义词
def GetTermSimilary_ByMySql(database, term_list, topk):
    BestSimilarDoc = {}
    for term in term_list:
        term_corr1 = database.Find("label", "tag_correlation", filter={"tag1": term})
        term_corr2 = database.Find("label", "tag_correlation", filter={"tag2": term})

        corrByTerm = []
        for corrDoc1 in term_corr1:
            key1 = corrDoc1["tag1"] + '_' + corrDoc1["tag2"]
            corr1 = corrDoc1["correlation"]
            corrByTerm.append([key1, corr1])

        for corrDoc2 in term_corr2:
            key2 = corrDoc2["tag1"] + '_' + corrDoc2["tag2"]
            corr2 = corrDoc2["correlation"]
            corrByTerm.append([key2, corr2])

        corrByTerm_df = pd.DataFrame(corrByTerm, columns=['key', 'corr'])
        #corrByTerm_df = corrByTerm_df.set_index('key')
        corrByTerm_df = corrByTerm_df.sort_values(by=['corr'], ascending=False)
        corrByTerm_df.index = range(len(corrByTerm_df))
        #corrByTerm_df_topk = corrByTerm_df.iloc[0:topk, :]
        #print(corrByTerm_df)

        BestSimilar = []
        for i in range(topk):
            tag1 = corrByTerm_df.loc[i, 'key'].split('_')[0]
            tag2 = corrByTerm_df.loc[i, 'key'].split('_')[1]
            if tag1 == term:
                tag_similar = tag2
            else:
                tag_similar = tag1
            #BestSimilar.append([i, tag_similar, corrByTerm_df.loc[i, 'corr']])
            BestSimilar.append(tag_similar)
            #print(term, tag_similar, corrByTerm_df.loc[i, 'corr'])
        BestSimilarDoc[term] = BestSimilar

    return BestSimilarDoc

#FindWithSQL算词的近义词缓存
def GetTermSimilary_FindWithSql(database, term_list, topk):
    cacheDoc = {}
    count = 0
    for term in term_list:
        count += 1
        print(count)
        sqlCommand = "SELECT * FROM label.tag_correlation where tag1='"+term+"' or tag2='"+term+"' order by correlation desc limit "+str(topk)
        BestSimilar = database.FindWithSQL('label', 'tag_correlation', sqlCommand)
        similar_tag = []
        for BestSimilarDoc in BestSimilar:
            tag1 = BestSimilarDoc['tag1']
            tag2 = BestSimilarDoc['tag2']
            if tag1 == term:
                similar_tag.append(tag2)
            else:
                similar_tag.append(tag1)

        cacheDoc[term] = similar_tag
    return cacheDoc


#根据文章tag联想股票、行业、基金tag
def Article_Tag_Associate(mongo, database, field, cache_method):  #field=stock,industry or fund;cache_method=ByMonGo,ByMySql,FindWithSql
    #提取stock_list,industry_list, fund_list
    if field == 'stock':
        method = 'Associate_stock'
        tag_stock = database.Find('label', 'tag', filter={"class_id": 174})
        common_list = []
        for stockDoc in tag_stock:
            common_list.append(stockDoc['name'])

    if field == 'industry':
        method = 'Associate_industry'
        tag_industry = database.Find('label', 'tag', filter={"class_id": 176})
        common_list = []
        for industryDoc in tag_industry:
            common_list.append(industryDoc['name'])

    if field == 'fund':
        method = 'Associate_fund'
        tag_fund = database.Find('label', 'tag', filter={"class_id": 183})
        common_list = []
        for fundDoc in tag_fund:
            common_list.append(fundDoc['name'])
    print('list done')

    #缓存表
    if cache_method == 'ByMonGo':
        BestSimilarDoc_common = GetTermSimilary_ByMonGo(mongo, term_list=common_list, topk=10)
        print('cache done')
    if cache_method == 'ByMySql':
        BestSimilarDoc_common = GetTermSimilary_ByMySql(mongo, term_list=common_list, topk=10)
        print('cache done')
    if cache_method == 'FindWithSql':
        BestSimilarDoc_common = GetTermSimilary_FindWithSql(mongo, term_list=common_list, topk=10)
        print('cache done')

    #获取所有文章id
    articleId = database.Find('text', 'news')
    articleId_list = []
    for articleIdDoc in articleId:
        articleId_list.append(articleIdDoc['ID'])
    print('articleid done')

    count = 0
    for artid in articleId_list:
        count += 1
        #if count < 2:
            #continue
        print(count)
        articleTag = database.Find('label', 'article_tag', filter={"article_id": artid})
        ariticleTag_list = []
        for articleTagDoc in articleTag:
            article_tag = articleTagDoc['tag']
            ariticleTag_list.append(article_tag)   #文章的tag_list

        print(ariticleTag_list)

        associate_tag = []
        #与缓存表比较
        for common in common_list:
            try:
                lenght = len(list(set(BestSimilarDoc_common[common]) & set(ariticleTag_list)))
            except:
                lenght = 0

            if lenght != 0:
                associate_tag.append([common, method])

        print(associate_tag)

        if len(associate_tag) != 0:
            for associtag in associate_tag:
                tagid = database.Find('label', 'tag', filter={"name": associtag[0]})[0]['id']

                newDoc = {}
                newDoc["article_id"] = artid
                newDoc["tag"] = associtag[0]
                newDoc["tag_id"] = tagid
                newDoc["method"] = associtag[1]
                database.Upsert("label", "article_tag_asset",
                                target={"article_id": artid, "tag_id": tagid}, document=newDoc)


def BestVectorSize_Batch(database, pathfilename, start_size, end_size, step, term_window):
    context = Text_to_WordSegments_Batch(database)
    print('context done', context)
    vocabulary = GetVocabulary(pathfilename)
    print('vocabulary done')
    effect_df = BestVectorSize(vocabulary, context, start_size, end_size, step, term_window)
    print(effect_df)
    effect_df.to_csv('D:/effect_size.csv', encoding='GBK')

    return effect_df


def Article_Tag_Associate_Batch(database, databasesql, pathfilename, best_size, term_window, field, cache_method):
    print(datetime.datetime.now())
    context = Text_to_WordSegments_Batch(database)
    print('context done')
    vocabulary = GetVocabulary(pathfilename)
    print('vocabulary done')
    vocabulary_in = SaveVocabularyVector(database, vocabulary, context, best_size, term_window)   #计算词向量入库
    print('load vector done')
    #LoadCorrelation(database, vocabulary_in)  #相关性入库
    print('load corr done')
    #Article_Tag_Associate(database, databasesql, field, cache_method)
    print(datetime.datetime.now())


def Associate_Stock_Product(database):
    stock_productDoc = {}
    stocks = database.find("Instruments", "Stock")
    symbol_list = []
    for stockDoc in stocks:
        symbol = stockDoc['Symbol']
        symbol_list.append(symbol)

    for stock in symbol_list:
        products = database.find("Stock", "Fundamental", query={"Symbol": stock})
        if len(products) == 0:
            continue
        product = products[-1]['Values']['SegmentSalesByProduct']
        i = 1
        while product == 0:
            i += 1
            product = products[-i]['Values']['SegmentSalesByProduct']

        product_list = product.split(';')
        sales_product = []
        for oneproduct in product_list:
            if oneproduct[0:4] == '其他业务':
                continue
            sales_product.append(oneproduct.split(':')[0])
        #print(sales_product)
        stock_productDoc[stock] = sales_product
    print(stock_productDoc)

    return stock_productDoc


def Associate_Fund_Position(database):
    fund_stock = {}
    stock_map = {}
    asset_map = database.find("Label", "Asset_Tag_Map")
    for asset_mapDoc in asset_map:
        stock_map[asset_mapDoc['Symbol']] = asset_mapDoc['Tag_Name']

    funds = database.find("Instruments", "MutualFund")
    symbol_list = []
    for fundDoc in funds:
        symbol = fundDoc['Symbol']
        symbol_list.append(symbol)

    for fund in symbol_list:
        try:
            positions = database.find("MutualFund", "Reports", query={"Symbol": fund})[-1]['Positions']
            print(fund)
        except:
            continue

        positions_sort = sorted(positions, key=lambda x: x['Equity'], reverse=True)
        if len(positions_sort) > 10:
            positions_select = positions_sort[0:10]

        else:
            positions_select = positions_sort

        position_list = [x['Symbol'] for x in positions_select]
        stock_list = []
        for position_symbol in position_list:
            stock_list.append(stock_map[position_symbol])

    fund_stock[fund] = stock_list

    return fund_stock


def EventDriven(database, datetime2, rolling_days):
    #股票緩存
    stock_list = []
    stock = database.find("Label", "Tag", query={"class": "股票名称"})
    for stockDoc in stock:
        stock_list.append(stockDoc['name'])
    #投資主題緩存
    invest_theme_list = []
    invest_theme = database.find("Label", "Tag", query={"class": "投资主题"})
    for invest_themeDoc in invest_theme:
        invest_theme_list.append(invest_themeDoc['name'])

    #取時間範圍內的文章id
    #datetime = datetime.datetime(2019, 2, 27)
    datetime1 = datetime2 - datetime.timedelta(rolling_days)
    filter = {}
    # filter["DateTime"] = {"$gte": datetime1, "$lte": datetime2}
    filter["DateTime"] = {">": datetime1, "<=": datetime2}
    articles = database.Find("Text", "News", filter)
    article_id = []
    for articleDoc in articles:
        article_id.append(articleDoc['_id'])

    #統計所有tag
    tag_num = {}
    tag_list = []
    count = 0
    for Id in article_id:
        count += 1
        print(count)
        article_tag = database.find("Label", "Article_Tag", query={"Article_Id": Id})
        if len(article_tag) == 0:
            continue

        for article_tagDoc in article_tag:
            tag_name = article_tagDoc['Tag_Name']
            tag_list.append(tag_name)
            try:
                tag_num[tag_name] = tag_num[tag_name] + article_tagDoc['Weight']
            except:
                tag_num[tag_name] = article_tagDoc['Weight']

    #股票和投資主題交集
    stock_set = list(set(stock_list) & set(tag_list))
    invest_theme_set = list(set(invest_theme_list) & set(tag_list))

    tag_num_sort = sorted(tag_num.items(), key=lambda item: item[1], reverse=True)
    tag_num_least = sorted(tag_num.items(), key=lambda item: item[1], reverse=False)
    #print(tag_num_least)

    frequent_stock = []
    for one_tag in tag_num_sort:
        if one_tag[0] in stock_set:
            frequent_stock.append(one_tag[0])
        if len(frequent_stock) >= 10:
            break

    frequent_invest_theme = []
    for tag1 in tag_num_sort:
        if tag1[0] in invest_theme_set:
            frequent_invest_theme.append(tag1[0])
        if len(frequent_invest_theme) >= 10:
            break

    least_invest_theme = []
    for tag2 in tag_num_least:
        if tag2[0] in invest_theme_set:
            least_invest_theme.append(tag2[0])
        if len(least_invest_theme) >= 10:
            break

    print(frequent_stock, frequent_invest_theme, least_invest_theme)

    return frequent_stock, frequent_invest_theme, least_invest_theme


def EventDriven_Increase(database, datetime2, rolling_days):
    #股票緩存
    stock_list = []
    stock = database.find("Label", "Tag", query={"class": "股票名称"})
    for stockDoc in stock:
        stock_list.append(stockDoc['name'])
    #投資主題緩存
    invest_theme_list = []
    invest_theme = database.find("Label", "Tag", query={"class": "投资主题"})
    for invest_themeDoc in invest_theme:
        invest_theme_list.append(invest_themeDoc['name'])

    #取時間範圍內的文章id
    #datetime1 = datetime.datetime(2019, 2, 27)
    datetime1 = datetime2 - datetime.timedelta(rolling_days)
    datetime0 = datetime1 - datetime.timedelta(rolling_days)

    filter = {}
    # filter["DateTime"] = {"$gte": datetime1, "$lte": datetime2}
    filter["DateTime"] = {">": datetime1, "<=": datetime2}
    articles_now = database.Find("Text", "News", filter)
    article_id_now = []
    for articleDoc in articles_now:
        article_id_now.append(articleDoc['_id'])

    filter = {}
    # filter["DateTime"] = {"$gte": datetime1, "$lte": datetime2}
    filter["DateTime"] = {">": datetime0, "<=": datetime1}
    articles_ago = database.Find("Text", "News", filter)
    article_id_ago = []
    for articleDoc1 in articles_ago:
        article_id_ago.append(articleDoc1['_id'])

    #統計now和ago的tag
    tag_num_now = {}
    tag_list_now = []
    for Id1 in article_id_now:
        article_tag_now = database.find("Label", "Article_Tag", query={"Article_Id": Id1})
        if len(article_tag_now) == 0:
            continue

        for article_tag_nowDoc in article_tag_now:
            tag_name_now = article_tag_nowDoc['Tag_Name']
            tag_list_now.append(tag_name_now)
            try:
                tag_num_now[tag_name_now] = tag_num_now[tag_name_now] + article_tag_nowDoc['Weight']
            except:
                tag_num_now[tag_name_now] = article_tag_nowDoc['Weight']

    tag_num_ago = {}
    tag_list_ago = []
    for Id2 in article_id_ago:
        article_tag_ago = database.find("Label", "Article_Tag", query={"Article_Id": Id2})
        if len(article_tag_ago) == 0:
            continue

        for article_tag_agoDoc in article_tag_ago:
            tag_name_ago = article_tag_agoDoc['Tag_Name']
            tag_list_ago.append(tag_name_ago)
            try:
                tag_num_ago[tag_name_ago] = tag_num_ago[tag_name_ago] + article_tag_agoDoc['Weight']
            except:
                tag_num_ago[tag_name_ago] = article_tag_agoDoc['Weight']

    # 只用修改now的交集部分，因為其他部分是新出現的直接計數即可
    union = list(set(tag_list_ago) & set(tag_list_now))
    for one in union:
        tag_num_now[one] = tag_num_now[one] - tag_num_ago[one]

    tag_num_sort = sorted(tag_num_now.items(), key=lambda item: item[1], reverse=True)

    #股票和投資主題交集
    stock_set = list(set(stock_list) & set(tag_list_now))
    invest_theme_set = list(set(invest_theme_list) & set(tag_list_now))


    frequent_stock = []
    for one_tag in tag_num_sort:
        if one_tag[0] in stock_set:
            frequent_stock.append(one_tag[0])
        if len(frequent_stock) >= 10:
            break

    frequent_invest_theme = []
    for tag1 in tag_num_sort:
        if tag1[0] in invest_theme_set:
            frequent_invest_theme.append(tag1[0])
        if len(frequent_invest_theme) >= 10:
            break


    print(frequent_stock, frequent_invest_theme)

    return frequent_stock, frequent_invest_theme





database = db.MongoDB("10.13.144.252", "27017")
databasesql = MySQLDB.MySQLDB("10.13.144.252", "3306", username="root", password="kirk2019")
#datetime2 = datetime.datetime(2019, 2, 27)
#EventDriven_Increase(database, datetime2, rolling_days=7)
#EventDriven(database, datetime2, rolling_days=1)



# 1. 找最优向量长度
#effect_df = BestVectorSize_Batch(database, pathfilename="D:/vocabulary_new.txt",
                                 #start_size=20, end_size=100, step=10, term_window=5)

# 2. 联想文章的股票、行业、基金tag入库
Article_Tag_Associate_Batch(database, databasesql, pathfilename="D:/vocabulary_new.txt",
                            best_size=200, term_window=5, field='stock', cache_method='ByMonGo')


# 3. 如果已有相关性库，直接运行Article_Tag_Associate
#Article_Tag_Associate(database, databasesql, field='stock', cache_method='ByMonGo')




