"""
@author: 石沙
@date: 2020-12-06
@content：本模块用于定义word2vec模型的训练器（WordEmbedding）以及在器基础上的文档向量生成器（WordEmbeddingDoc）
"""
from gensim import models
import sys

sys.path.append(r'/home/user10000630/notespace/Chatbot_227_ef9a')
from site_packages.utils.job import DataOp
import numpy as np
import os
from configs.settings import SAVED_MODEL_PATH
import logging
import os
import configs.settings as conf
import logging
import pickle
import tempfile
from gensim.corpora import Dictionary
from gensim.models import TfidfModel


class WordEmbedding:
    """
    可以选择Word2Vec和FastText两种方式训练
    """
    _models = {
        'word2vec': models.Word2Vec,
        'fasttext': models.FastText
    }

    def __init__(self, model_type='word2vec', model_name='word2vec', save=True, mode='train', **embedding_params):
        """
        :param model_name: 模型名称，可以选择_models中的键
        :param mode: load，加载已有模型；train，重新训练
        :param kwargs: 模型所需的参数
        """
        self.model = None
        self.model_type = model_type
        self.model_name = model_name
        self.mode = mode
        self.embedding_params = embedding_params
        self.vocab = None
        self.save = save
        self.unknown = np.random.rand(
            self.embedding_params['size'])  # [0 for i in range(self.embedding_params['size'])]

    def _save(self):
        model_file_path = os.path.join(SAVED_MODEL_PATH, self.model_name)
        self.model.save(model_file_path)
        print('模型已存储于如下路径：', model_file_path)

    def train(self, corpus=None):
        # 1.训练
        if self.mode == 'train':
            assert corpus is not None
            self.model = self._models[self.model_type](corpus, **self.embedding_params)
            print('已训练完成')
        elif self.mode == 'load':
            self.model = self._models[self.model_type].load(os.path.join(SAVED_MODEL_PATH, self.model_name))
            print('已加载完成')

        # 2.获得词词汇表
        self.vocab = list(self.model.wv.vocab.keys())

        # 3.存储模型
        if self.save:
            self._save()

    def transform_single_text(self, text):

        def _f(word):
            try:
                word_embedded = self.model.wv[word]
            except:
                word_embedded = self.unknown
            return word_embedded

        return np.array(list(map(lambda w: _f(w), text)))

    def transform(self, texts):
        return np.array(list(map(self.transform_single_text, texts)))


class WordEmbeddingDoc:

    def __init__(self, model_type='word2vec', model_name='word2vec',
                 method='mean',
                 transformers=None,
                 **embedding_params):
        """
        :param model_name: 可选word2vec和fasttext
        :param mode: train，正常训练；load，加载已保存的模型
        :param save: True，训练后进行保存
        :param method: mean，在获取文档向量的时候采用平均的方式；max，采用最大值方式
        :param transformers: 对目标词汇序列进行一定的转换，需要定义
        :param embedding_params: 针对词嵌入模型（word2vec和fasttext）的参数设置
        """
        assert method in ['mean', 'max', 'min']

        self.embedding = self.get_model(model_type=model_type, model_name=model_name, **embedding_params)
        self.method = method
        self.embedding_params = embedding_params
        self.transformers = transformers
        self.blank = [0.0 for i in range(self.embedding_params['size'])]

    def get_model(self, model_type=None, model_name=None, **embedding_params):
        return WordEmbedding(model_type=model_type, model_name=model_name, mode='load', save=False, **embedding_params)

    def get_doc2vec(self, text_embeddings):

        methods = {
            'mean': np.mean,
            'max': np.max,
            'min': np.min
        }

        def doc2vec(text_embedding):
            if text_embedding.shape[0] == (1, self.embedding_params['size']):
                return list(text_embedding)
            elif len(text_embedding) == 0:
                return self.blank
            return list(methods[self.method](text_embedding, axis=0))

        return np.array(list(map(doc2vec, text_embeddings)))

    def train(self, X=None):
        logging.info('开始训练：')

        logging.info('---执行词向量抽取---')
        self.embedding.train(X)

    def transform(self, X):
        if self.transformers is not None:
            logging.info('---执行过滤---')
            for f in self.transformers:
                X = f.transform(X)

        text_embeddings = self.embedding.transform(X)
        logging.info('---获取文本向量---')
        return self.get_doc2vec(text_embeddings)


class Tfidf:

    """利用tfidf得分筛选重要度高的词"""

    def __init__(self, save=True, model_name=None):
        """
        :param filter_method:
        :param limit: 限制TopK
        :param save: 是否保存模型
        :param model_name:
        """
        self.dic = None
        self.model = None
        self._filters = None
        self.tfidf_corpus = None
        self._save = save
        self.model_name = model_name

    def save(self, model_name='tf-idf', prefix='gensim-model-'):
        temporary_filepath = os.path.join(conf.SAVED_MODEL_PATH, model_name)

        # 存储字典
        dic_filepath = os.path.join(conf.SAVED_MODEL_PATH, 'tf-idf-dic.pkl')
        with open(dic_filepath, 'wb') as f:
            pickle.dump(self.dic, f)
            logging.info('字典已存储于如下路径：', dic_filepath)

        # 存储tf-idf模型
        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
            self.model.save(temporary_filepath)
            logging.info('模型已存储于如下路径：', temporary_filepath)

    def load(self, model_name='tf-idf'):
        logging.info('正在加载模型...')
        with open(os.path.join(conf.SAVED_MODEL_PATH, 'tf-idf-dic.pkl'), 'rb') as f:
            dic = pickle.load(f)
        model = TfidfModel.load(os.path.join(conf.SAVED_MODEL_PATH, model_name))
        logging.info('已加载完成')
        return model, dic

    def idx2token(self, tfidf_corpus):
        return list(map(lambda s: dict([(self.dic[w[0]], w[1]) for w in s]), tfidf_corpus))

    def train(self, corpus):

        # 加载模型
        if self.model_name is not None:
            self.model, self.dic = self.load(model_name=self.model_name)
            bow_corpus = list(map(lambda text: self.dic.doc2bow(text), corpus))
        else:
            self.dic = Dictionary(corpus)
            bow_corpus = list(map(lambda text: self.dic.doc2bow(text), corpus))
            self.model = TfidfModel(bow_corpus)
        if self._save:
            self.save()

        # 转换
        self.tfidf_corpus = self.model[bow_corpus]


if __name__ == '__main__':
    # 训练好word2vec模型，存储备用
    EMBEDDING_PARAMS = {
        'min_count': 2,
        'size': 200,
        'workers': 4,
        'iter': 15,
    }

    data = DataOp.load_data('train')
    data.head()
    word_embedding = WordEmbedding(mode='train', **EMBEDDING_PARAMS)
    word_embedding.train(data['customer_speech_list'].tolist())