'''
Created on 2019年8月23日
@author: sida
'''
from compute_DistributionSimilarity import *

from util import Node
import util
#公司信息词袋-->公司之间相互的交叉熵矩阵-->TopN小
node_CountMatrix = Node(name='csv parser and CountMatrix',compute=get_csvData,persistence_path='cache/CountMatrix')
node_log_Matrix = Node(name='logMatrix',compute=log_Matrix,sunNodes=[node_CountMatrix],persistence_path='cache/log_Matrix')
node_inference = Node(name='ds_inference', compute=inference, sunNodes=[node_log_Matrix, node_CountMatrix], forceUpdate=True, save=False)
node_ds_similarity = Node(name='node_ds_similarity',compute=util.singel_model_similarity,sunNodes=[node_log_Matrix],superParams=([],{'order':'-'}),forceUpdate=True)

#公司信息句子向量-->向量间的余弦相似度-->TopN大
node_csvFile2Obj = Node(name='ds_csvFile2Obj',compute=ds_csvFile2Obj,persistence_path='cache/ds_csvFile2Obj')
node_info2vecByBert = Node(name='info2vecByBert',sunNodes=[node_csvFile2Obj],compute=info2vecByBert,persistence_path='cache/info2vecByBert')
node_computeF_MatrixCos = Node(name='computeF_MatrixCos',sunNodes=[node_info2vecByBert],compute=computeF_MatrixCos,persistence_path='cache/computeF_MatrixCos')
node_inference_work = Node('ds_inference_work',sunNodes=[node_computeF_MatrixCos,node_csvFile2Obj],compute=ds_inference_work,persistence_path='cache/ds_inference_work')

if __name__ == '__main__':
    compulsory_Update_Nodes = [ node_CountMatrix,node_log_Matrix,node_inference  ]
    NoForce_Update_Nodes = [ node_CountMatrix  ]
    wanted_Nodes = NoForce_Update_Nodes + compulsory_Update_Nodes
    for node in compulsory_Update_Nodes:
        node.forceUpdate=True
    [print(type(node.getData())) for node in wanted_Nodes]
    
#     import graph_wordsMatrixOfBertSimilarity as wB
#     csvdata,company2segemnt = wB.node_csvFile2Obj.getData(),node_CountMatrix.getData()[2]
#     for _ in [2,55,1417,444]:
#         print(csvdata.iloc[_],company2segemnt[_],sep='\n')
    
    
    
