"""
@author: 石沙
@date: 2020-10-13
@content：本模块用于数据清洗
"""
from datasets import load_clothing_data
import jieba
from collections import OrderedDict
import re
import numpy as np
from site_packages.utils.job import DataOp
from site_packages.ml_libs.nlp.base import Dictionary
from site_packages.ml_libs.nlp.stopwords import Stopwords
import configs.settings as conf
from copy import deepcopy


# 年龄-年龄描述的映射关系，用来将年龄的数字换成年龄描述
age_level = OrderedDict({
    3: '宝宝',
    9: '儿童',
    12: '小孩',
    18: '年轻',
    30: '青年',
    35: '中青年',
    45: '中年',
    60: '中老年',
    np.inf: '老年'
})


# 价格-价格描述的映射关系，用来将价格的数字换成年龄描述
price_level = OrderedDict({
    200: '低价',
    500: '正常',
    1000: '偏高',
    np.inf: '高价'
})


class Preprocessor:
    """
    按预设步骤执行数据清洗工作

    Attr：
        df: pd.DataFrame，包含的列为'rec_id', 'title', 'kb', 'ocr', 'reference'
        is_training: Bool，是否是训练模式，在训练模式下，会增加对reference部分的处理；
            而在模型提供服务的情况下，并没有reference作为输入，所以仅对inputs部分做处理
        non_chara_columns: 包含特殊字符的列名，后续会对特殊字符进行处理
        stopwords: 停用词处理器
        processors: 包含每一步数据清洗函数名的列表，处理按列表中的顺序进行。is_training=True的
            训练模式会比False多三个处理步骤
    """
    def __init__(self, df, is_training=False):
        self.df = df
        self.is_training = is_training
        self.non_chara_columns = ['ocr', 'reference'] if is_training else ['ocr']
        self.stopwords = Stopwords('symbol')
        self.stopwords.add([' ', '的'])
        self.processors = [
            'uniform_kb_age',
            'uniform_kb_price',
            'replace_non_character',
            'merge_kb',
            'lcut_ocr',
            'split_title',
            'merge_inputs',
            'clean_inputs_stopwords'
        ]
        if self.is_training:
            self.processors += ['lcut_ref', 'clean_ref_stopwords', 'merge_full_contents']

    def compare_level(self, item, dict_=None):
        for key, value in dict_.items():
            if item <= key:
                return value
        return ''

    def uniform_dict_value(self, kb, keyword=None, dict_=None, re_digit=re.compile(r'\d+')):
        """
        :param kb:
        :param re_digit: 提取数字的正则表达式
        :return:
        """
        for key, value in kb.items():
            if keyword in key:
                nums = re.findall(re_digit, value)
                if len(nums) > 0:
                    kb[key] = self.compare_level(float(max(nums)), dict_=dict_)
                    return kb
        return kb

    def uniform_kb_age(self):
        self.df['kb'] = self.df['kb'].apply(self.uniform_dict_value, keyword='年龄', dict_=age_level)

    def uniform_kb_price(self):
        self.df['kb'] = self.df['kb'].apply(self.uniform_dict_value, keyword='年龄', dict_=age_level)

    def replace_non_character(self):
        for col in self.non_chara_columns:
            self.df[col] = self.df[col].str.replace(re.compile(r'\W+?'), ' ')

    def merge_kb(self):

        def merge(kb):
            word_list = []
            for key, value in kb.items():
                key = jieba.lcut(key)
                value = jieba.lcut(value)
                word_list.extend(key + value)
            return word_list

        self.df['kb'] = self.df['kb'].apply(merge)

    def lcut_ocr(self):
        self.df['ocr'] = self.df['ocr'].apply(jieba.lcut)

    def lcut_ref(self):
        self.df['reference'] = self.df['reference'].apply(jieba.lcut)

    def split_title(self):
        self.df['title'] = self.df['title'].str.split(' ')

    def merge_inputs(self):
        self.df['inputs'] = self.df['title'] + self.df['kb'] + self.df['ocr']

    def clean_inputs_stopwords(self):
        self.df['inputs'] = self.df['inputs'].apply(self.stopwords.clean)

    def clean_ref_stopwords(self):
        self.df['reference'] = self.df['reference'].apply(self.stopwords.clean)

    def merge_full_contents(self):
        self.df['full_contents'] = self.df['inputs'] + self.df['reference']

    def run(self):
        for processor in self.processors:
            exec('self.{}()'.format(processor))
        if self.is_training:
            return self.df[['rec_id', 'inputs', 'reference', 'full_contents']]
        else:
            return self.df[['rec_id', 'inputs']]


if __name__ == '__main__':
    # # 加载数据
    # df = load_clothing_data()
    #
    # # 清洗数据
    # processor = Preprocessor(df, is_training=True)
    # result = processor.run()
    # DataOp.save(result, 'cleaned_data', is_model=False)

    # 生成字典c
    result = DataOp.load_data('cleaned_data')
    sentences = result['full_contents'].values.tolist()
    dic = Dictionary(sentences, min_count=2, max_vocab_size=conf.MAX_VOCAB_SIZE)
    print('字典长度为:', len(dic))
    DataOp.save(dic, 'dictionary', is_model=False)

    # 生成扩展版字典，供pointer network使用
    expanded_dic = deepcopy(dic)
    expanded_dic.build(sentences, min_count=1, max_vocab_size=np.inf, to_trim=False)
    DataOp.save(expanded_dic, 'expanded_dic', is_model=False)
    print(expanded_dic.num_words)

    # 记录oovs单词
    in_article_oovs = set(expanded_dic.word2idx.keys()) - set(dic.word2idx.keys())
    DataOp.save(in_article_oovs, 'in_article_oovs', is_model=False)
