"""
author：石沙
date：2020-09-28
content：本模块来执行特征预处理
"""

# 如下导入时为保证训练时的任务流能正常执行
import sys
from settings import MAIN_PATH, SRC_PATH
sys.path.extend([MAIN_PATH, SRC_PATH])

import os
from settings import PROCESSED_DATA_PATH, NULLS
from datasets import load_book_info
from site_packages.ml_libs.nlpkits.stopwords import Stopwords
import re
import jieba
import numpy as np
import pickle
import pandas as pd


class BookInfoProcessor:

    def __init__(self, inputs=None, mode='train'):
        """
        :param inputs: 来自post的输入
        :param mode: train，读取数据集；
                    answer，接收post数据集，格式为字典的列表，形如[{'book_id':1, 'title': '三体3'，'content':'程心是大boss'}]
        """
        self.mode = mode
        if self.mode == 'train':
            self.df = load_book_info()
        else:
            self.df = self.to_dataframe(inputs)
        self.stopwords = Stopwords()
        self.label_mapping = None
        self.processors = [
            'drop_third_class',
            'rename_id',
            'drop_both_null',
            'fillna_both',
            'replace_year',
            'replace_month',
            'cut_by_jieba',
            'clean_by_stopwords',
            'filter_single_character',
            'join_both',
            'remove_number',
            'mark_text_size',
        ]
        self.processors_train_only = [
            'trim_imcomplete_texts',
            'label2index',
            'shuffle',
            'save'
        ]

    def to_dataframe(self, inputs):
        assert len(inputs) > 0
        if isinstance(inputs, list) and isinstance(inputs[0], dict):
            return pd.DataFrame(inputs)
        else:
            raise Exception('输入数据格式错误，应该为字典型')

    def drop_third_class(self):
        """删除不需要的三级类目"""
        if 'thirdClass' in self.df.columns:
            del self.df['thirdClass']

    def rename_id(self):
        """将_id重新命名为book_id"""
        self.df.rename(columns={'_id': 'book_id'}, inplace=True)

    def drop_both_null(self):
        """删除title和content都是空的列"""
        self.df = self.df[~((self.df['title'].isnull()) & (self.df['content'].isnull()))].copy()

    def fillna_both(self):
        """对title和content进行空值填充"""
        self.df['title'].fillna(NULLS['str'], inplace=True)
        self.df['content'].fillna(NULLS['str'], inplace=True)

    def replace_year(self):
        """将【40年】这类词表示年份的词统一替换成【年份】"""
        regex_year = re.compile(r'(\d+年)')
        self.df['title'] = self.df['title'].str.replace(regex_year, '年份')
        self.df['content'] = self.df['content'].str.replace(regex_year, '年份')

    def replace_month(self):
        """将【5月】这类词表示月份的词统一替换成【月份】"""
        regex_month = re.compile(r'(\d+月)')
        self.df['title'] = self.df['title'].str.replace(regex_month, '年份')
        self.df['content'] = self.df['content'].str.replace(regex_month, '年份')

    def cut_by_jieba(self):
        """对文本进行结巴分词"""
        self.df['title_list'] = self.df['title'].apply(jieba.lcut)
        self.df['content_list'] = self.df['content'].apply(jieba.lcut)

    def clean_by_stopwords(self):
        """去除停用词"""
        self.df['title_list'] = self.df['title_list'].apply(self.stopwords.clean)
        self.df['content_list'] = self.df['content_list'].apply(self.stopwords.clean)

    def filter_single_character(self):
        """去掉没用的单字"""
        def _filter(line):
            return list(filter(lambda w: len(w) > 1, line))

        self.df['title_list'] = self.df['title_list'].apply(_filter)
        self.df['content_list'] = self.df['content_list'].apply(_filter)

    def join_both(self):
        """拼接切分后的title和content"""
        self.df['full_content'] = list(map(lambda x, y: x + y, self.df['title_list'], self.df['content_list']))

    def remove_number(self):
        """清除所有单个数字"""
        def _remove(word_list):
            regex_num = re.compile(r'(\d+)')
            line = re.sub(regex_num, '', ' '.join(word_list))
            return line.split()

        self.df['full_content'] = self.df['full_content'].apply(_remove)

    def label2index(self):
        """进行类别到索引的转换"""
        cates = self.df['secondClass'].unique()
        self.label_mapping = dict(zip(cates, np.arange(len(cates))))
        self.df['label'] = self.df['secondClass'].apply(lambda x: self.label_mapping[x])

    def mark_text_size(self):
        """记录处理后的文本长度"""
        self.df['text_size'] = self.df['full_content'].apply(len)

    def trim_imcomplete_texts(self):
        """仅保留文字长度大于等于两个单词且content不为空的记录"""
        self.df = self.df[(self.df['text_size'] >= 2) & (self.df['content'] != '')].copy()

    def shuffle(self):
        self.df = self.df.sample(frac=1, random_state=1).reset_index(drop=True)

    def save(self):
        self.df.to_pickle(os.path.join(PROCESSED_DATA_PATH, 'book_clean.pkl'))
        with open(os.path.join(PROCESSED_DATA_PATH, 'label_dict.pkl'), 'wb') as f:
            pickle.dump(self.label_mapping, f)

    def run(self):
        for processor in self.processors:
            print('当前执行：{}'.format(processor))
            exec('self.{processor}()'.format(processor=processor))
        if self.mode == 'answer':
            return self.df
        else:
            for processor in self.processors_train_only:
                print('当前执行：{}'.format(processor))
                exec('self.{processor}()'.format(processor=processor))


if __name__ == '__main__':
    # 对数据进行基本的预处理
    book_processor = BookInfoProcessor(mode='train')
    book_processor.run()

