import numpy as np
from scipy import spatial
import jieba
from gensim.models import word2vec,KeyedVectors
from scipy.linalg import norm

#对每个句子的所有词向量取均值，来生成一个句子的vector
#sentence是输入的句子，size是词向量维度，w2v_model是训练好的词向量模型
def build_sentence_vector(sentence,size,w2v_model):
    vec=np.zeros(size).reshape((1,size))
    count=0
    for word in sentence:
        try:
            vec+=w2v_model[word].reshape((1,size))
            count+=1
        except KeyError:
            continue
    if count!=0:
        vec/=count
    return vec

#计算两个句向量的余弦相似性值
def cosine_similarity(vec1, vec2):
    a= np.array(vec1)
    b= np.array(vec2)
    cos1 = np.sum(a * b)
    cos21 = np.sqrt(sum(a ** 2))
    cos22 = np.sqrt(sum(b ** 2))
    cosine_value = cos1 / float(cos21 * cos22)
    return cosine_value

#输入两个句子，计算两个句子的余弦相似性
def compute_cosine_similarity(sents_1, sents_2):
    size=300
    #w2v_model=word2vec.Word2Vec.load_word2vec_format('model/wiki_cn_vec_300.bin', binary=True)
    w2v_model = KeyedVectors.load_word2vec_format('model/wiki_cn_vec_300.bin')
    vec1=build_sentence_vector(sents_1,size,w2v_model)
    vec2=build_sentence_vector(sents_2,size,w2v_model)
    similarity = cosine_similarity(vec1, vec2)
    return similarity

def vector_similarity(s1, s2):
    def sentence_vector(s):
        words = jieba.lcut(s)
        v = np.zeros(64)
        for word in words:
            v += model[word]
        v /= len(words)
        return v

    v1, v2 = sentence_vector(s1), sentence_vector(s2)
    return np.dot(v1, v2) / (norm(v1) * norm(v2))

if __name__ == '__main__':
    s1 = "邮箱格式怎么写"
    s2 = "你在做什么"
    #simi = compute_cosine_similarity(s1, s2)
    model = KeyedVectors.load_word2vec_format('model/wiki_cn_vec_64.bin')
    simi = vector_similarity(s1, s2)
    print(simi)