'''
Created on 2019年8月23日
@author: sida
'''
import node_DistributionSimilarity as node_ds
    
#公司信息词袋-->公司之间相互的交叉熵矩阵-->TopN小
node_CountMatrix = node_ds.node_CountMatrix
node_log_Matrix = node_ds.node_log_Matrix
node_inference = node_ds.node_inference

#公司信息句子向量-->向量间的余弦相似度-->TopN大
node_csvFile2Obj = node_ds.node_csvFile2Obj
node_info2vecByBert = node_ds.node_info2vecByBert
node_computeF_MatrixCos = node_ds.node_computeF_MatrixCos
node_inference_work = node_ds.node_inference_work

if __name__ == '__main__':
    compulsory_Update_Nodes = [ node_CountMatrix,node_log_Matrix,node_inference  ]
    NoForce_Update_Nodes = [ node_CountMatrix  ]
    wanted_Nodes = NoForce_Update_Nodes + compulsory_Update_Nodes
    for node in compulsory_Update_Nodes:
        node.forceUpdate=True
    [print(type(node.getData())) for node in wanted_Nodes]
    
#     import node_wordsMatrixOfBertSimilarity as node_wmb
#     csvdata,company2segemnt = node_wmb.node_csvFile2Obj.getData(),node_CountMatrix.getData()[2]
#     for _ in [2,55,1417,444]:
#         print(csvdata.iloc[_],company2segemnt[_],sep='\n')
    
    
    
