import datetime

from gensim import similarities
import similarity.caculatetfidf as caculatetfidf
import similarity.jiebaSegmentationWord as jiebaSegmentationWord
import numpy as np
import word2vec.word2vec as word2vec


#
# ## j计算tfidf 值，用于计算相似度
# vec_tfidf, word_list,tfidf, dictionary, num_features = caculatetfidf.caculatetfidf()
#
# ## gensim 提供的相似度计算
# sparse_matrix = similarities.SparseMatrixSimilarity(vec_tfidf, num_features)
#
# ## 将传入的语句计算词袋模型
# bow_vector = dictionary.doc2bow(jiebaSegmentationWord.cut("在泰山路小区进行巡视时，发现居民将衣物晾晒在电线上、存在安全隐患"))
# bow_tfidf = tfidf[bow_vector]
#
# ## 计算相似度
# similarities_list = sparse_matrix.get_similarities(bow_tfidf)
#
# ## 排序
# sorted_similarities_list = sorted(enumerate(similarities_list), key=lambda x: x[1],reverse=True)
#
# result_similarities = [[word_list[i[0]],i[1]] for i in sorted_similarities_list[0:10]]
#
# print(result_similarities)

def getSimilarity_top10(text=""):
    # j计算tfidf 值，用于计算相似度
    vec_tfidf, word_list, tfidf, dictionary, num_features = caculatetfidf.caculatetfidf()

    # gensim 提供的相似度计算
    sparse_matrix = similarities.SparseMatrixSimilarity(vec_tfidf, num_features)

    # 将传入的语句计算词袋模型
    bow_vector = dictionary.doc2bow(jiebaSegmentationWord.cut(text))
    bow_tfidf = tfidf[bow_vector]

    # 计算相似度
    similarities_list = sparse_matrix.get_similarities(bow_tfidf)

    # 排序
    sorted_similarities_list = sorted(enumerate(similarities_list), key=lambda x: x[1], reverse=True)

    result_similarities_top10 = [[word_list[i[0]], i[1]] for i in sorted_similarities_list[0:10]]

    result_str = []
    for i in result_similarities_top10:
        result_str.append({'similarity': i[1], 'text': i[0]})

    return result_similarities_top10, result_str


def getSimilarity_top10_from_db(text="", create_time=datetime.datetime.now()):
    # j计算tfidf 值，用于计算相似度
    vec_tfidf, corpus, tfidf, dictionary, num_features, cut_word_list = caculatetfidf.calculate_tdidf_from_db(text,
                                                                                                              create_time)

    # gensim 提供的相似度计算
    sparse_matrix = similarities.SparseMatrixSimilarity(vec_tfidf, num_features)

    # 将传入的语句计算词袋模型
    bow_vector = dictionary.doc2bow(jiebaSegmentationWord.cut(text))
    bow_tfidf = tfidf[bow_vector]

    # 计算相似度
    similarities_list = sparse_matrix.get_similarities(bow_tfidf)

    # 排序
    sorted_similarities_list = sorted(enumerate(similarities_list), key=lambda x: x[1], reverse=True)

    result_similarities_top10 = [[corpus[i[0]], i[1]] for i in sorted_similarities_list[0:10]]

    result_str = []
    for i in result_similarities_top10:
        result_str.append({'similarity': float(i[1]), 'text': i[0]})

    return result_similarities_top10, result_str


def getSimilarity_top10_from_db_extended(text="", create_time=datetime.datetime.now()):
    # j计算tfidf 值，用于计算相似度
    vec_tfidf, word_list, tfidf, dictionary, num_features, cut_word_list = caculatetfidf.calculate_tdidf_from_db(text,
                                                                                                                 create_time)

    # gensim 提供的相似度计算
    sparse_matrix = similarities.SparseMatrixSimilarity(vec_tfidf, num_features)

    # 将传入的语句计算词袋模型
    bow_vector = dictionary.doc2bow(jiebaSegmentationWord.cut(text))
    bow_tfidf = tfidf[bow_vector]

    # 计算文本相似度
    similarities_list = sparse_matrix.get_similarities(bow_tfidf)

    result_text_similarities = [[word_list[i[0]], i[1]] for i in enumerate(similarities_list)]

    # 计算vec相似度
    result_vec_similarities, result_str = word2vec.get_similarity_word_2_vec_from_list(cut_word_list, text)

    # 按比例计算相似度 （70% 文本 + 30% vec）
    result_vec_similarities = [i[1] for i in enumerate(result_vec_similarities)]
    result_str = []
    for i in result_text_similarities:
        result_str.append({'similarity': float(i[1]) * 0.7 + result_vec_similarities[0][1] * 0.3, 'text': i[0],
                           'text_similarity': float(i[1]), 'vec_similarity': result_vec_similarities[0][1]})

    result_str = sorted(result_str, key=lambda x: x['similarity'], reverse=True)
    return result_str
    # return result_str
