import jieba
from gensim import corpora,models,similarities
texts=open('我国软件工程技术的现状.txt','r').read()
keyword=open('对软件开发工作进行创新的方法.txt','r').read()
texts=texts.split("。")
keyword=keyword.split("。")

all_doc_list=[]
for doc in texts:
    text1=[text for text in jieba.cut(doc)]
    all_doc_list.append(text1)

dictionary = corpora.Dictionary(all_doc_list)
num_features=len(dictionary.keys())
corpus=[dictionary.doc2bow(text) for text in all_doc_list]
lsi = models.LsiModel(corpus)
index = similarities.SparseMatrixSimilarity(lsi[corpus], num_features)
count =0
for a in keyword:
    doc_test_list = [word for word in jieba.cut(a)]
    doc_test_vec = dictionary.doc2bow(doc_test_list)
    sim = index[lsi[doc_test_vec]]
    cc = sorted(enumerate(sim), key=lambda item: item[1], reverse=True)
    if cc[0][1]>0.8:
        count +=1

print('相似度：{:^.2%}'.format(count/len(keyword)))



