#-*-coding:utf-8-*-
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity


class SIF_similarity:
    def __init__(self, word_freq, word_embedding, a=0.001, rmpc=1):
        self.word_freq = word_freq
        self.word_embedding = word_embedding
        self.a = a
        self.rmpc = rmpc

    def sen_embedding(self, sen_list):

        total_freq = sum(self.word_freq.values())
        embedding = np.zeros((len(sen_list), self.word_embedding.shape[1]))
        for i, sen in enumerate(sen_list):
            for word in sen:
                if word>=0 and word<self.word_embedding.shape[0]:
                    pw = self.word_freq[word] / total_freq
                    embedding[i] += (self.a/(self.a+pw))*self.word_embedding[word]/len(sen)
        
        if self.rmpc>0:
            embedding = self.remove_pc(embedding)
        return embedding

    def remove_pc(self, sen_embedding):
        svd = TruncatedSVD(n_components=1, n_iter=7, random_state=0)
        svd.fit(sen_embedding)
        pc = svd.components_
        if self.rmpc==1:
            rpc_embedding = sen_embedding - sen_embedding.dot(pc.transpose())*pc
        else:
            rpc_embedding = sen_embedding - sen_embedding.dot(pc.transpose()).dot(pc)
        return rpc_embedding


    def fit(self, sen1_list, sen2_list):
        if len(sen1_list)!=len(sen2_list):
            raise ValueError("The length of sentence list is not equal. ")
        sen_list = sen1_list + sen2_list
        embedding = self.sen_embedding(sen_list)
        # return cosine_similarity(embedding[:len(sen1_list)], embedding[len(sen1_list):])
        return [cosine_similarity([embedding[i]], [embedding[len(sen_list)//2+i]])[0][0] for i in range(len(sen_list)//2)]
