'''
Created on 2019年8月23日
@author: sida
'''
import numpy as np
import compute_data_pre as dtpr
from util import Node
import util
import re

def index2Info(self):
    csvData = self.sunNodes[0].getData()
    texts = [];trivial = ['上交所','深交所','交易所',"DATAEXA"]
    for _,row in csvData.iterrows():
        labels = eval(row['labels(n)'])
        labels = [label for label in labels if not util.strContainSub(trivial,label)]
        info = row['n.公司简介']+''.join(labels)
        texts.append(info)
    texts = [dtpr.seg_sentence(text)for text in texts]
    texts = [''.join(text) for text in texts]
    print('index2Info over',texts[0],texts[1],sep='\n')
    return texts
def index2Sentences(self):
    csvData = self.sunNodes[0].getData()
    texts = [];trivial = ['上交所','深交所','交易所',"DATAEXA"]
    for _,row in csvData.iterrows():
        labels = eval(row['labels(n)'])
        labels = [label for label in labels if not util.strContainSub(trivial,label)]
        sentences =re.split('(。|！|\!|\.|？|\?)',row['n.公司简介'])+labels
        sentences = [s for s in sentences if len(s)>2]
        texts.append(sentences)
    texts_length = [util.meanLength(ll) for ll in texts]
    print('平均句子长度',np.mean(texts_length))
    return texts
def sentences2MatrixByBert(self):
    #得到每个公司信息的句向量矩阵编码,或者经过分词后的全体分词转矩阵编码
    texts = self.sunNodes[0].getData()[2]
    from bert_serving.client import BertClient
    bc = BertClient()
    sentences2Matrix = []
    for index,text in enumerate(texts):
        print(index,text)
        sentences2Matrix.append(bc.encode(text))
    return sentences2Matrix
def info2MatrixByBert(self):
    # 将整个公司信息囫囵吞枣的丢给bert，给出句子的所有字向量矩阵，注意后面使用要丢弃多余的0向量
    texts =index2Info(self)
    from bert_serving.client import BertClient
    bc = BertClient()
    i=batch=100#一个batch 避免内存消耗过大
    info2Matrix = bc.encode(texts[:i])
    while i<len(texts):
        i+=batch
        info2Matrix = np.concatenate((info2Matrix, bc.encode(texts[i-batch:i]) ) )
        print('batch accumulate:',i)
    return info2Matrix

def computeF(self,similarityF=util.groupVectorSimilarity):
    info2Matrix = self.sunNodes[0].getData()

    info2Matrix = np.vstack([ a.mean(0) for a in info2Matrix ] )
    
    print('info2Matrix[0] shape:',info2Matrix[0].shape)
    nums = len(info2Matrix)if type(info2Matrix)==list else info2Matrix.shape[0]
    SimilarityMx = np.zeros((nums,nums))
    for i in range(SimilarityMx.shape[0]):
        SimilarityMx[i][i] = similarityF(info2Matrix[i], info2Matrix[i])
        for j in range(i+1,SimilarityMx.shape[1]):
            print(i,j)
            SimilarityMx[i][j] = similarityF(info2Matrix[i], info2Matrix[j])
            SimilarityMx[j][i] = SimilarityMx[i][j]
    return SimilarityMx
def computeOSD(self):
    info2Matrix = self.sunNodes[0].getData()
    print('info2Matrix[0] shape:',info2Matrix[0].shape)
    SimilarityMx = np.zeros( (len(info2Matrix),len(info2Matrix)) )
    for i in range(SimilarityMx.shape[0]):
        SimilarityMx[i][i] = util.MatrixOSMeanDist(info2Matrix[i], info2Matrix[i])
        for j in range(i+1,SimilarityMx.shape[1]):
            print(i,j)
            SimilarityMx[i][j] = util.MatrixOSMeanDist(info2Matrix[i], info2Matrix[j])
            SimilarityMx[j][i] = SimilarityMx[i][j]
    print('compute mean:',SimilarityMx.mean(),sep=' ')
    return SimilarityMx
def inference_work(self):
    TopN = 6
    computeScore = self.sunNodes[0].getData()
    index2Name = self.sunNodes[1].getData()
    work = computeScore[555]
    keysSort = np.argsort(work)[-TopN:]
    print(keysSort)
    [print( index2Name.iloc[key],'Similarity',work[key] )for key in keysSort]

def computetransferMatrixMeanSimilarity(self):
    info2Matrix = self.sunNodes[0].getData()
    SimilarityMx = np.zeros( (len(info2Matrix),len(info2Matrix)) )
    for i in range(SimilarityMx.shape[0]):
        SimilarityMx[i][i] = util.transferMatrixMeanSimilarity_diff_p(info2Matrix[i], info2Matrix[i])
        for j in range(i+1,SimilarityMx.shape[1]):
            print(i,j)
            SimilarityMx[i][j] = util.transferMatrixMeanSimilarity_diff_p(info2Matrix[i], info2Matrix[j])
            SimilarityMx[j][i] = SimilarityMx[i][j]
#         if i%10 == 0:print(i)
    print('compute mean:',SimilarityMx.mean(),sep=' ')
    return SimilarityMx

#分词后的公司信息句子--》从bert得到句子中各个标签的句向量（标签看做短语句子），从而公司信息对应到矩阵--》计算cos值，大的相似-选inference_work topN大
import node_DistributionSimilarity as node_ds
node_sentences2MatrixByBert_tokens = Node(name='tokens_matrix',compute=sentences2MatrixByBert,sunNodes=[node_ds.node_CountMatrix])
node_computeF_sentences2MatrixByBert_tokens = Node(name='sentences2MatrixByBert_tokens',sunNodes=[node_sentences2MatrixByBert_tokens],\
                                                   compute=computeF,superParams=([],{'similarityF':util.vectorCos}))
node_inference_cos = Node(name='inference_cos',sunNodes=[node_computeF_sentences2MatrixByBert_tokens,node_ds.node_csvFile2Obj],compute=inference_work,save=False)
node_wmb_similarity = Node(name='node_wmb_similarity',compute=util.singel_model_similarity,sunNodes=[node_computeF_sentences2MatrixByBert_tokens],forceUpdate=True)

if __name__ == '__main__':
    compulsory_Update_Nodes = [ node_inference_cos ,node_wmb_similarity ]
    NoForce_Update_Nodes = [  ]
    wanted_Nodes = NoForce_Update_Nodes + compulsory_Update_Nodes
    for node in compulsory_Update_Nodes:
        node.forceUpdate=True
    [print(type(node.getData())) for node in wanted_Nodes]
else:import inspect,sys;__all__ = [funcNameAndAddress[0] for funcNameAndAddress in inspect.getmembers(sys.modules[__name__],inspect.isfunction)]

    
    
    
    
