import numpy as np
import db.db_init as db
import similarity.jiebaSegmentationWord as jiebaSegmentationWord
from gensim.models import word2vec
import common.invertedIndex as invertedIndex
import datetime


def similarity_word_2_vec(text="", createTime=datetime.datetime.now(), fname="model/word2vec_model.vec"):
    # 获取语料库 - 包含数据id和语料
    corpus = db.get_corpus_from_db(createTime)

    corpus = invertedIndex.query_by_field_id(text, corpus, "content")

    # 分词
    words_list = [jiebaSegmentationWord.cut(corpu["content"]) for corpu in corpus]
    word2vec_lib = []

    # 训练word2vec 并保存模型。
    # hs，sg
    # 0,0 - max = 0.9289198517799377(准确) min =  0.9107471704483032
    # 0,1 - max = 0.9989997744560242（不准确） min =  0.9989339709281921
    # 1,0 - max = 0.9889759421348572（不准确） min =  0.9815497398376465
    # 1,1 - max = 0.9672732949256897（不准确） min =  0.9604067802429199
    # hs，sg = 0，0 ——> window
    # 5 - max = 0.9980837106704712（不准确） min =  0.9975791573524475
    # 1 - max = 0.7473244071006775（准确） min =  0.6714566349983215
    # 2 - max = 0.9266360402107239（准确） min =  0.9091594815254211
    # hs，sg = 0，0 ——> window = 1 vector_size
    # 300 - max = 0.7995537519454956（准确） min =  0.7382739186286926
    # 400 - max = 0.7594188451766968（准确） min =  0.6836355924606323
    # 500 - max = 0.8012598752975464（准确） min =  0.7283751964569092
    # model = None
    # 如果存在模型，则直接加载模型
    # if os.path.exists(fname):
    #     model = word2vec.Word2Vec.load(fname)
    # else:
    #     model = word2vec.Word2Vec(words_list, vector_size=400, window=1, min_count=1, hs=0, sg=0, seed=42, workers=10)
    #     model.save(fname)
    model = word2vec.Word2Vec(words_list, vector_size=400, window=1, min_count=1, hs=0, sg=0, seed=42, workers=10)
    print(len(model.wv))
    for words in words_list:
        w_vec = np.array([model.wv[word] for word in words if word in model.wv])
        # 矩阵运算 - 对应位置求平均
        w_vec = np.mean(w_vec, axis=0)
        word2vec_lib.append(w_vec)
    return word2vec_lib, model, corpus


def normalize_vector(vec):
    """
    归一化向量
    :param vec: 输入向量
    :return: 归一化后的向量
    """
    norm = np.linalg.norm(vec)
    if norm == 0:
        return vec
    return vec / norm


def cos_distance(vec1, vec2):
    """
    计算两个向量之间的余弦距离
    :param vec1: 向量1
    :param vec2: 向量2
    :return: 余弦距离
    """
    vec1 = np.array(vec1)
    vec2 = np.array(vec2)

    return float(np.sum(vec1 * vec2)) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))

    # 归一化向量
    # vec1 = normalize_vector(vec1)
    # vec2 = normalize_vector(vec2)
    #
    # return float(np.dot(vec1, vec2))


def calculate_similarity_by_word2vec(text2vec, word2vec_lib, model):
    similarity_list = []
    for word2vec in word2vec_lib:
        sim = cos_distance(text2vec, word2vec)
        similarity_list.append(sim)
    return similarity_list


def get_word2vec(model, text=""):
    cut_word_list = jiebaSegmentationWord.cut(text)
    vec = [model.wv[word] for word in cut_word_list if word in model.wv]
    vec_dic = np.array(vec)
    vec_mean = np.mean(vec_dic, axis=0)
    return vec_mean


def get_similarity_word_2_vec(text=""):
    word2vec_lib, model, corpus = similarity_word_2_vec(text)
    vec_mean = get_word2vec(model, text)
    # 比较词向量
    similarity_list = calculate_similarity_by_word2vec(vec_mean, word2vec_lib, model)

    # 排序
    sorted_similarities_list = sorted(enumerate(similarity_list), key=lambda x: x[1], reverse=True)

    result_similarities_top10 = [[corpus[i[0]], i[1]] for i in sorted_similarities_list[0:10]]

    result_str = []
    for i in result_similarities_top10:
        result_str.append({'similarity': float(i[1]), 'text': i[0]})

    return result_similarities_top10, result_str


# 预处理语料
def prepare_data_from_list(cut_corpus):
    word2vec_lib = []
    model = word2vec.Word2Vec(cut_corpus, vector_size=400, window=1, min_count=1, hs=0, sg=0, seed=42, workers=10)
    print(len(model.wv))
    for words in cut_corpus:
        w_vec = np.array([model.wv[word] for word in words if word in model.wv])
        # 矩阵运算 - 对应位置求平均
        w_vec = np.mean(w_vec, axis=0)
        word2vec_lib.append(w_vec)
    return word2vec_lib, model, cut_corpus


# 获取文本相似度，从指定列表中。
# 输入：语料库，文本
def get_similarity_word_2_vec_from_list(cut_corpus, text="") -> object:
    word2vec_lib, model, corpus = prepare_data_from_list(cut_corpus)
    vec_mean = get_word2vec(model, text)
    # 比较词向量
    similarity_list = calculate_similarity_by_word2vec(vec_mean, word2vec_lib, model)
    # 将计算结果返回
    result_similarities = [[corpus[i[0]], i[1]] for i in enumerate(similarity_list)]

    result_str = []
    for i in result_similarities:
        result_str.append({'similarity': float(i[1]), 'text': i[0]})

    return result_similarities, result_str

# str1 = "顺风小区修枯树有安全隐患"
# str2 = "来话人反映1月9日10：33双流区红樱路的下水道堵塞导致道路上溢出了很多污水，市民要求整治。"
#
# str1_words_list = [[jiebaSegmentationWord.cut(str1)]]
# str2_words_list = [[jiebaSegmentationWord.cut(str2)]]
#
# print(cos_distance(str1_words_list, str2_words_list))
