# -*- coding: utf-8 -*-

# @File    : Similarity.py
# @Date    : 2019-09-17
# @Author  : gongsunmolan

# 文本拆词，相似度比较

import jieba
from gensim import corpora, models, similarities
import os

curr_dir = os.getcwd()
print(curr_dir)
# 添加自定义词典
# file_name 为文件类对象或自定义词典的路径
jieba.load_userdict('E:/03/wechat_robot/Resources/dic/dict.txt')

doc0 = "服务器配的双网，其中10.段的那根网线插到服务器灯不亮，网口调换一下还是10.段的不亮，但这根网线插到自己电脑上灯是亮的，谁知道改怎么处理？服务器ve2208z"
doc1 = "阵列不可用"
doc2 = "云存储2004界面阵列不可用"
doc3 = "存储阵列不可用，删除后重新做阵列后，无法添加到存储池，日志提示添加存储池失败，怎么排查下"
doc4 = "李川疆上海好玩的在哪里"
doc5 = "上海是好地方"
doc6 = "上海路和上海人"
doc7 = "喜欢小吃"
doc_test = "我喜欢上海的小吃"
all_doc = []
all_doc.append(doc0)
all_doc.append(doc1)
all_doc.append(doc2)
all_doc.append(doc3)
all_doc.append(doc4)
all_doc.append(doc5)
all_doc.append(doc6)
all_doc.append(doc7)
all_doc_list = []
for doc in all_doc:
    doc_list = [word for word in jieba.cut(doc)]
    all_doc_list.append(doc_list)

print(all_doc_list)

doc_test_list = [word for word in jieba.cut(doc_test)]
print(doc_test_list)

dictionary = corpora.Dictionary(all_doc_list)
dictionary.keys()
dictionary.token2id
corpus = [dictionary.doc2bow(doc) for doc in all_doc_list]

doc_test_vec = dictionary.doc2bow(doc_test_list)
doc_test_vec
print(doc_test_vec)
tfidf = models.TfidfModel(corpus)
tfidf[doc_test_vec]

index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dictionary.keys()))
sim = index[tfidf[doc_test_vec]]

print(sim)
sorted(enumerate(sim), key=lambda item: -item[1])
