"""
author：石沙
date：2020-09-28
content：本模块用进行词向量特征提取任务
"""

import os
import numpy as np
from gensim import models
import tempfile
from collections import Counter
import logging
from gensim.corpora import Dictionary
import pickle
from settings import SAVED_MODEL_PATH
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from time import time
import jieba
from site_packages.utils.math import softmax


class WordEmbedding:
    """
    可以选择Word2Vec和FastText两种方式训练
    """
    _models = {
        'word2vec': models.Word2Vec,
        'fasttext': models.FastText
    }

    def __init__(self, model_name='word2vec', save=True, mode='train', **embedding_params):
        """
        :param model_name: 模型名称，可以选择_models中的键
        :param mode: load，加载已有模型；train，重新训练
        :param kwargs: 模型所需的参数
        """
        self.model = None
        self.model_name = model_name
        self.mode = mode
        self.embedding_params = embedding_params
        self.vocab = None
        self.save = save
        self.unknown = np.random.rand(self.embedding_params['size'])

    def _save(self):
        model_file_path = os.path.join(SAVED_MODEL_PATH, self.model_name)
        self.model.save(model_file_path)
        print('模型已存储于如下路径：', model_file_path)

    def train(self, corpus=None):
        # 1.训练
        if self.mode == 'train':
            assert corpus is not None
            self.model = self._models[self.model_name](corpus, **self.embedding_params)
            print('已训练完成')
        elif self.mode == 'load':
            self.model = self._models[self.model_name].load(os.path.join(SAVED_MODEL_PATH, self.model_name))
            print('已加载完成')

        # 2.获得词词汇表
        self.vocab = list(self.model.wv.vocab.keys())

        # 3.存储模型
        if self.save:
            self._save()

    def transform_single_text(self, text):

        def _f(word):
            try:
                word_embedded = self.model.wv[word]
            except:
                word_embedded = self.unknown
            return word_embedded

        return np.array(list(map(lambda w: _f(w), text)))

    def transform(self, texts):
        return np.array(list(map(self.transform_single_text, texts)))


class UncommonWordsFilter:

    """用于去除低频词"""

    def __init__(self, min_count):
        self.min_count = min_count

    def flatten(self, list_2d):
        flat_list = []
        for l in list_2d:
            flat_list.extend(l)
        return flat_list

    def transform(self, X):
        X_counter = Counter(self.flatten(X))
        filtered_items = set(filter(lambda w: w[1] < self.min_count, X_counter.items()))
        filtered_items = set(dict(filtered_items).keys())

        def _f(text):
            return list(filter(lambda w: w not in filtered_items, text))

        return list(map(lambda text: _f(text), X))


class TfidfFilter:

    """利用tfidf得分筛选重要度高的词"""

    def __init__(self, filter_method='by_order', limit=20, save=False, model_name=None):
        """
        :param filter_method:
        :param limit: 限制TopK
        :param save: 是否保存模型
        :param model_name:
        """
        self.filter_method = filter_method
        self.dic = None
        self.model = None
        self._filters = None
        self.tfidf_corpus = None
        self.limit = limit
        self._save = save
        self.model_name = model_name

    def save(self, model_name='tf-idf', prefix='gensim-model-'):
        temporary_filepath = os.path.join(SAVED_MODEL_PATH, model_name)

        # 存储字典
        dic_filepath = os.path.join(SAVED_MODEL_PATH, 'tf-idf-dic.pkl')
        with open(dic_filepath, 'wb') as f:
            pickle.dump(self.dic, f)
            logging.info('字典已存储于如下路径：', dic_filepath)

        # 存储tf-idf模型
        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
            self.model.save(temporary_filepath)
            logging.info('模型已存储于如下路径：', temporary_filepath)

    def load(self, model_name='tf-idf'):
        logging.info('正在加载模型...')
        with open(os.path.join(SAVED_MODEL_PATH, 'tf-idf-dic.pkl'), 'rb') as f:
            dic = pickle.load(f)
        model = models.TfidfModel.load(os.path.join(SAVED_MODEL_PATH, model_name))
        logging.info('已加载完成')
        return model, dic

    def idx2token(self, tfidf_corpus):
        return list(map(lambda s: dict([(self.dic[w[0]], w[1]) for w in s]), tfidf_corpus))

    def add_filters(self, tfidf_corpus):
        tfidf_corpus = self.idx2token(tfidf_corpus)
        return list(map(lambda s: pd.Series(s).sort_values(ascending=False).head(self.limit), tfidf_corpus))

    def train(self, corpus):

        # 加载模型
        if self.model_name is not None:
            self.model, self.dic = self.load(model_name=self.model_name)
            bow_corpus = list(map(lambda text: self.dic.doc2bow(text), corpus))
        else:
            self.dic = Dictionary(corpus)
            bow_corpus = list(map(lambda text: self.dic.doc2bow(text), corpus))
            self.model = models.TfidfModel(bow_corpus)
        if self._save:
            self.save()

        # 转换
        self.tfidf_corpus = self.model[bow_corpus]
        self._filters = self.add_filters(self.tfidf_corpus)

    def transform(self, X):

        def _f(line, f):
            return list(filter(lambda w: w in f.index, line))

        if self.filter_method == 'by_order':
            return list(map(_f, X, self._filters))


class WordEmbeddingDoc:

    def __init__(self, model_name='word2vec',
                 method='mean',
                 transformers=None,
                 **embedding_params):
        """
        :param model_name: 可选word2vec和fasttext
        :param mode: train，正常训练；load，加载已保存的模型
        :param save: True，训练后进行保存
        :param method: mean，在获取文档向量的时候采用平均的方式；max，采用最大值方式
        :param transformers: 对目标词汇序列进行一定的转换，需要定义
        :param embedding_params: 针对词嵌入模型（word2vec和fasttext）的参数设置
        """
        assert method in ['mean', 'max', 'min']

        self.embedding = self.get_model(model_name=model_name, **embedding_params)
        self.method = method
        self.embedding_params = embedding_params
        self.transformers = transformers

    def get_model(self, model_name, **embedding_params):
        return WordEmbedding(model_name=model_name, mode='load', save=False, **embedding_params)

    def get_doc2vec(self, text_embeddings):

        methods = {
            'mean': np.mean,
            'max': np.max,
            'min': np.min
        }

        def doc2vec(text_embedding):
            if text_embedding.shape[0] == (1, self.embedding_params['size']):
                return text_embedding
            return list(methods[self.method](text_embedding, axis=0))

        return np.array(list(map(lambda text: doc2vec(text), text_embeddings)))

    def train(self, X):
        logging.info('开始训练：')

        logging.info('---执行词向量抽取---')
        self.embedding.train(X)

        if self.transformers is not None:
            logging.info('---执行过滤---')
            for f in self.transformers:
                X = f.transform(X)

        text_embeddings = self.embedding.transform(X)
        logging.info('---获取文本向量---')
        return self.get_doc2vec(text_embeddings)

    def transform(self, X):
        if self.transform is not None:
            logging.info('---执行过滤---')
            for f in self.transformers:
                X = f.transform(X)

        text_embeddings = self.embedding.transform(X)
        logging.info('---获取文本向量---')
        return self.get_doc2vec(text_embeddings)


class NgramEmbedding:

    def __init__(self, ngram=2, model_name='word2vec', method='mean', mode='train', detect=False, **embedding_params):
        """
        :param ngram: 指定n-gram所选取的窗口大小
        :param model_name: 选取的词嵌入模型类别
        :param method: 对获取的词向量采取mean_pooling还是max_pooling
        :param mode: train，训练ngram模型；load，加载ngram模型
        :param embedding_params: 词嵌入模型所需的变量
        """
        self.vector = CountVectorizer(min_df=1, ngram_range=(ngram,ngram))
        self.ngram = ngram
        if detect:
            self.mode = (
                'load'
                 if os.path.exists(os.path.join(SAVED_MODEL_PATH, 'ngram{}.pkl'.format(self.ngram)))
                 else mode
            )
        else:
            self.mode = mode
        self.embedding = WordEmbedding(model_name=model_name, save=False, mode='load', **embedding_params)
        self.embedding_params = embedding_params
        self.vocab = None
        self.method = method
        self.unknown = np.random.rand(3, embedding_params['size'])
        self.embedding_vocab = None

    def fit_transform(self, X):
        return self.vector.fit_transform(X)

    def text2embedding(self, text):
        line = self.embedding.transform_single_text(text).reshape(-1, self.ngram * self.embedding_params['size'])
        if self.method == 'mean':
            return np.mean(line, axis=0)
        else:
            return np.max(line, axis=0)

    def inverse_transform(self, X_transformed):
        for line in X_transformed:
            words = list(map(lambda w: self.idx2token[w].split(), line.indices))
            if len(words) == 0:
                words = self.unknown
            yield np.array(words).flatten()

    def list2string(self, X):
        return list(map(lambda line:' '.join(line), X))

    def train(self, X=None):
        start = time()
        X = self.list2string(X)
        if self.mode == 'train':
            X_transformed = self.vector.fit_transform(X)
            with open(os.path.join(SAVED_MODEL_PATH, 'ngram{}.pkl'.format(self.ngram)), 'wb') as f:
                pickle.dump(self.vector, f)
            print('模型已保存至{}'.format(SAVED_MODEL_PATH))
        else:
            with open(os.path.join(SAVED_MODEL_PATH, 'ngram{}.pkl'.format(self.ngram)), 'rb') as f:
                self.vector = pickle.load(f)
            X_transformed = self.vector.fit_transform(X)
            print('模型已加载')

        print('第一段',time() - start)
        start = time()
        tokens, idx = self.vector.vocabulary_.keys(), self.vector.vocabulary_.values()
        self.idx2token = dict(zip(idx, tokens))
        X_words = self.inverse_transform(X_transformed)
        print('第二段',time() - start)
        start = time()
        self.embedding.train()
        result = np.array(list(map(self.text2embedding, X_words)))
        print('第三段',time() - start)
        return result

    def transform(self, X):
        X = self.list2string(X)
        X_transformed = self.vector.transform(X)
        X_words = self.inverse_transform(X_transformed)
        return np.array(list(map(lambda line: self.text2embedding(line), X_words)))


class AttentionEmbedding:

    """对特征采用attention机制"""

    def __init__(self, label_dict, model_name='word2vec', method='mean',**embedding_params):
        """
        :param label_dict:
        :param model_name: 选取的词嵌入模型类别
        :param mode: train，训练ngram模型；load，加载ngram模型
        :param save: 是否保存词嵌入模型
        :param method: 对获取的词向量采取mean_pooling还是max_pooling
        :param embedding_params: 词嵌入模型所需的变量
        """
        self.label_dict = label_dict
        self.method = method
        self.label_cut_dict = None
        self.embedding = self.get_model(model_name=model_name, mode='load', save=False, **embedding_params)
        self.count = 0

    def get_model(self, model_name, mode, save, **embedding_params):
        return WordEmbedding(model_name=model_name, mode=mode, save=save, **embedding_params)

    def get_label_embedding(self):
        labels, idx = list(self.label_dict.keys()), list(self.label_dict.values())
        label_list = list(map(jieba.lcut, labels))
        self.label_cut_dict = dict(zip(idx, label_list))

        def _f(label):
            label_embedding = []
            for word in label:
                if self.embedding.model.wv.vocab.get(word) is not None:
                    label_embedding.append(self.embedding.model.wv[word])
            return np.max(np.array(label_embedding), axis=0)

        label_embeddings = np.array(list(map(_f, label_list)))
        return label_embeddings

    def attention(self, text_embedding):
        L2 = np.dot(np.linalg.norm(text_embedding, axis=1).reshape(-1,1),
                    np.linalg.norm(self.label_embeddings, axis=1).reshape(1,-1))
        similarity_matrix = np.multiply(np.dot(text_embedding, self.label_embeddings.T), 1 / L2)
        attention = similarity_matrix.max(axis=1)
        attention = softmax(attention).reshape(-1, 1)

        attention_embedding = text_embedding * attention
        if self.method == 'mean':
            attention_embedding = np.mean(attention_embedding, axis=0)
        else:
            attention_embedding = np.max(attention_embedding, axis=0)

        return attention_embedding

    def train(self, X):
        self.embedding.train(X)
        self.label_embeddings = self.get_label_embedding()
        text_embeddings = self.embedding.transform(X)
        attention_embeddings = np.array(list(map(self.attention, text_embeddings)))
        return attention_embeddings