# -*- coding:utf8 -*-
# @Time : 2022/12/5 6:15 下午
# @Author : WanJie Wu

import fasttext
import numpy as np
import _pickle as pickle
from abc import abstractmethod
from gensim.models import Word2Vec


def gen_sentence_vec_by_word(feature):
    """词向量转句向量"""
    a_sum = feature.sum(axis=0)
    div = np.sqrt((feature ** 2).sum())
    if not div:
        return a_sum
    return a_sum / np.sqrt((feature ** 2).sum())


class Vector(object):
    @abstractmethod
    def sentence_vec(self, sentences):
        raise NotImplementedError


class TfidfVec(Vector):
    def __init__(self, model_path):
        with open(model_path, "rb") as f:
            self.model = pickle.load(f)

    def sentence_vec(self, sentences):
        """句子特征抽取，向量生成"""
        result = self.model.transform([" ".join(sentence) for sentence in sentences])
        return result.toarray()

    def words_tfidf(self, words):
        """获取当前句子的tfidf值"""
        result = self.model.transform([" ".join(words)])
        weight = result.toarray()[0]
        result = dict()
        for idx, word in enumerate(self.model.get_feature_names_out()):
            if not weight[idx]:
                continue
            result[word] = weight[idx]
        return result


class TfidfSVDVec(Vector):
    def __init__(self, tfidf_path, svd_path):
        with open(tfidf_path, "rb") as ft:
            self.model_t = pickle.load(ft)

        with open(svd_path, "rb") as fv:
            self.model_v = pickle.load(fv)

    def sentence_vec(self, sentences):
        """句子特征抽取，向量生成"""
        features = self.model_t.transform([" ".join(sentence) for sentence in sentences])
        result = self.model_v.transform(features.toarray())
        return result


class Word2VecPred(Vector):
    def __init__(self, model_path):
        self.model = Word2Vec.load(model_path)

    def word_vec(self, words):
        feature = []
        for word in words:
            try:
                feature.append(self.model.wv[word])
            except KeyError:
                feature.append(np.array([0.] * self.model.wv.vector_size))
        if not words:
            feature.append(np.array([0.] * self.model.wv.vector_size))
        return np.array(feature)

    def sentence_vec(self, sentences):
        features = []
        for words in sentences:
            feature = self.word_vec(words)
            sen_embed = gen_sentence_vec_by_word(feature)
            features.append(sen_embed)
        return np.array(features)


class FastTextVec(Vector):
    def __init__(self, model_path):
        self.model = fasttext.load_model(model_path)

    def word_vec(self, words):
        feature = []
        for word in words:
            try:
                feature.append(self.model[word])
            except KeyError:
                feature.append(np.array([0.] * self.model.get_dimension()))
        return np.array(feature)

    def sentence_vec(self, sentences):
        features = []
        for words in sentences:
            feature = self.word_vec(words)
            sen_embed = gen_sentence_vec_by_word(feature)
            features.append(sen_embed)
        return np.array(features)


if __name__ == "__main__":
    texts = [
        "冰箱高端 大气， 质量好，店家服务也很好",
    ]
    from app.src.embedding import nltk
    seg_obj = nltk.SegmentJB()
    vec_model = Word2VecPred("/data/output/word2vec.bin")
    sentence_lst = []
    for text in texts:
        words_lst = seg_obj.ss_segment(text)
        sentence_lst.append(words_lst)
    print(vec_model.sentence_vec(sentence_lst).shape)
