"""
author：石沙
date：2020-09-28
content：本模块来执行特征提取
"""

# 如下导入时为保证训练时的任务流能正常执行
import sys
from settings import MAIN_PATH, SRC_PATH
sys.path.extend([MAIN_PATH, SRC_PATH])


from datasets import *
from embedding import WordEmbeddingDoc, UncommonWordsFilter, WordEmbedding, NgramEmbedding, AttentionEmbedding
import pandas as pd
import numpy as np
from site_packages.utils.dataframe import ListMerger
from site_packages.utils.decorators import FuncDelay
from collections import OrderedDict


# embedding的参数设定
EMBEDDING_PARAMS = {
    'min_count': 2,
    'size': 200,
    'workers': 4
}


def word2vec_embedding(corpus=None):
    """获取word2vec的词嵌入"""
    # 训练word2vec模型
    if corpus is None:
        corpus = load_book_clean()
    embedding = WordEmbedding(model_name='word2vec', save=True, mode='train', **EMBEDDING_PARAMS)
    embedding.train(corpus['full_content'].tolist())


@FuncDelay
def word_embedding_doc(X, book_ids=None, method='mean', file_name=None, return_df=False):
    if not return_df:
        assert file_name is not None

    # 定义数据集的清理器
    embedding_transformers = [
        UncommonWordsFilter(EMBEDDING_PARAMS['min_count'])
    ]

    # 设定文档向量模型
    doc_model = WordEmbeddingDoc(model_name='word2vec',
                                 method=method,
                                 transformers=embedding_transformers,
                                 **EMBEDDING_PARAMS)

    # 获取文档向量
    doc_vecs = doc_model.train(X)

    # 确定是保存还是返回文档向量
    if return_df:
        return FeatureContainer.make_df(book_ids, doc_vecs, file_name)
    else:
        FeatureContainer.save(book_ids, doc_vecs, file_name)


@FuncDelay
def ngram_embedding(X, book_ids=None, ngram=2, method='mean', mode='train', detect=True, file_name=None,
                    return_df=False):
    if not return_df:
        assert file_name is not None

    # 设定文档向量模型
    doc_model = NgramEmbedding(ngram=ngram, method=method, mode=mode, detect=detect, **EMBEDDING_PARAMS)

    # 获取文档向量
    doc_vecs = doc_model.train(X)

    # 确定是保存还是返回文档向量
    if return_df:
        return FeatureContainer.make_df(book_ids, doc_vecs, file_name)
    else:
        FeatureContainer.save(book_ids, doc_vecs, file_name)


@FuncDelay
def attention_embedding(X, label_dict, book_ids=None, method='mean', file_name=None, return_df=False):
    if not return_df:
        assert file_name is not None

    # 设定文档向量模型
    doc_model = AttentionEmbedding(label_dict, method=method, **EMBEDDING_PARAMS)

    # 获取文档向量
    doc_vecs = doc_model.train(X)

    # 确定是保存还是返回文档向量
    if return_df:
        return FeatureContainer.make_df(book_ids, doc_vecs, file_name)
    else:
        FeatureContainer.save(book_ids, doc_vecs, file_name)


def ext_features(X, label_dict, book_ids=None, return_df=False):
    """
    执行设定的特征抽取操作
    :param X: 文本列表的列表，list of list，e.g. [['今天','天气'，'很好'],['小猫','很','招人','喜欢']]
    :param label_dict: 类别 - 类别索引的字典
    :param book_ids: 书籍的编号，list or np.ndarray
    :param return_df: True，返回各类特征merge后的数据集；False，分别保存特征的.pkl文件，文件命名是【字典的键】.pkl
    :return:
    """
    # 如果未传入文档id， 则用序列号作为文档id
    if book_ids is None:
        book_ids = pd.DataFrame(np.arange(len(X)).reshape(-1, 1), columns=['book_id'])

    # 特征抽取器的共享属性
    shared_params = {
        'book_ids': book_ids,
        'return_df': return_df
    }

    # 定义特征抽取器
    feature_extractors = OrderedDict({
        'embedding_doc_mean': word_embedding_doc(X, method='mean'),
        'embedding_doc_max': word_embedding_doc(X, method='max'),
        'ngram_embedding_2_mean': ngram_embedding(X, ngram=2, method='mean', detect=True),
        'ngram_embedding_2_max': ngram_embedding(X, ngram=2, method='max', detect=True),
        'ngram_embedding_3_mean': ngram_embedding(X, ngram=3, method='mean', detect=True),
        'ngram_embedding_3_max': ngram_embedding(X, ngram=3, method='max', detect=True),
        'ngram_embedding_4_mean': ngram_embedding(X, ngram=4, method='mean', detect=True),
        'ngram_embedding_4_max': ngram_embedding(X, ngram=4, method='max', detect=True),
        'attention_embedding_mean': attention_embedding(X, label_dict, method='mean'),
        'attention_embedding_max': attention_embedding(X, label_dict, method='max')
    })

    # 抽取特征
    feature_list = []

    for name, ext in feature_extractors.items():
        print('---------当前抽取：{}---------'.format(name))
        ext['kwargs']['file_name'] = name
        ext['kwargs'].update(shared_params)
        if return_df:
            df = ext['func'](*ext['args'], **ext['kwargs'])
            feature_list.append(df)
        else:
            ext['func'](*ext['args'], **ext['kwargs'])
    if return_df:
        return ListMerger([book_ids] + feature_list, 'book_id').run()


def main():
    data = load_book_undersample_1k()
    label_dict = load_label_dict()
    X = data['full_content'].tolist()
    y = data['label'].values
    book_ids = data[['book_id']].values

    # # 训练word2vec
    word2vec_embedding()

    # 执行特征抽取
    ext_features(X, label_dict, book_ids=book_ids, return_df=False)


if __name__ == '__main__':
    main()
