"""
@author: 石沙
@date: 2020-12-06
@content：本模块用于采样和数据清洗
"""

import pandas as pd
import configs.settings as conf
from site_packages.utils.job import DataOp
import re
from collections import OrderedDict
import jieba.posseg as pseg
from site_packages.utils.dataframe import flatten_2d_list
import os
import jieba
from site_packages.ml_libs.nlp.stopwords import Stopwords


stopwords = Stopwords()


def extract(speech, speech_prev, tmp, file_type, chats):
    """合并同session_id同role的发言"""
    if file_type == 'train':
        session_id, role, content = speech[0], speech[2], speech[-1]
        session_id_prev, role_prev = speech_prev[0], speech_prev[2]
    else:
        session_id, role, content = speech[2], speech[3], speech[4]
        session_id_prev, role_prev = speech_prev[2], speech_prev[3]

    if session_id != session_id_prev and session_id_prev != '':
        chats.append(tmp)
        tmp = [session_id, role, content]
    elif session_id_prev == '':
        tmp = [session_id, role, content]
    else:
        if role == role_prev:
            tmp[2] += conf.SEP + content
        else:
            chats.append(tmp)
            tmp = [session_id, role, content]
    return tmp, chats


def take_a_small_sample():
    """
    1. 统一将三个数据集放到processed_data文件夹下，免去后续指定路径的麻烦
    2. 对train取20%的样本量
    """
    chat_path = conf.DATA_PATH_INTENTION + '/chat.txt'
    dev_path = conf.DATA_PATH_INTENTION + '/开发集.txt'
    test_path = conf.DATA_PATH_INTENTION + '/测试集.txt'

    with open(chat_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        print('总记录数：', len(lines))
        sample_cnt = int(len(lines) * 0.20) # 20%的采样比例
        lines = lines[:sample_cnt]
        DataOp.save(lines, 'train_sample', is_model=False)

    with open(dev_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        DataOp.save(lines, 'dev_sample', is_model=False)

    with open(test_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        DataOp.save(lines, 'test_sample', is_model=False)


def restructure(file_type='train'):
    """
    对原数据集进行结构重构和清洗
    将txt数据转换为dataframe，并对同session_id同角色的发言，按顺序进行合并：
    示例：
    原始数据样式：
    session_id role content
         1      1     aaaa
         1      1     bbbb
         1      0     cccc
    目标数据样式：
    session_id role content
         1      1     aaaa[SEP]bbbb
         1      0     cccc

    :param file_type: train, dev, test
    :return:
    """
    columns = ['session_id', 'role', 'content']

    lines = DataOp.load_data(f'{file_type}_sample')
    chats = []
    tmp = []
    length = 7 if file_type == 'train' else 5
    speech_prev = ['' for i in range(length)]
    for i, line in enumerate(lines):
        speech = re.split('\t', line.strip().replace(' ', ''))
        if file_type == 'train' and len(speech) != 7:
            continue
        tmp, chats = extract(speech, speech_prev, tmp, file_type, chats)
        speech_prev = speech
        if i % 1000 == 0:
            print('当前处理：第{}个'.format(i))

    df_sample = pd.DataFrame(chats, columns=columns)
    df_sample['content'] = df_sample['content'].apply(filter_content)
    df_sample['role'] = df_sample['role'].apply(lambda x: 'customer' if int(x) == 0 else 'assistance')
    DataOp.save(df_sample, '{}_df'.format(file_type), is_model=False)


def build_dataset(file_type='train'):
    """构建训练、开发、测试数据集"""
    data = DataOp.load_data('{}_df'.format(file_type))

    data.reset_index(inplace=True)
    data['index'] = data['index'].apply(lambda x: x - 1 if x % 2 == 1 else x)
    data = data.pivot_table(index=['index', 'session_id'],
                            columns='role',
                            values='content',
                            aggfunc='first').reset_index()
    data = data[['session_id', 'customer',
                 'assistance']].dropna().reset_index(drop=True)
    data['customer_speech_list'] = data['customer'].apply(jieba.lcut)
    data['customer_speech_list'] = data['customer_speech_list'].apply(stopwords.clean)
    data['assistance_speech_list'] = data['assistance'].apply(jieba.lcut)
    data['assistance_speech_list'] = data['assistance_speech_list'].apply(stopwords.clean)
    DataOp.save(data, '{}'.format(file_type), is_model=False)


def filter_content(sentence):
    """
    去除和替换特殊字段有，特殊字段包含：
    1. #E-s[数字x] #E-2[数字x] 等一系列数字—— 表情
    2. [ORDERID_10187709] —— 订单号
    3. [数字x] —— 数字
    4. https://item.jd.com/5898522.html —— 网址
    5. [地址x] —— 地址
    6. [链接x] —— 链接
    7. [金额x] —— 金额
    8. [日期x] —— 日期
    9. [时间x] —— 时间
    10. [站点x] —— 站点
    11. [组织机构x] ——组织机构
    12. [电话x] —— 电话
    13. [姓名x] —— 人名
    对于表情，做法是直接删除。其他用希腊符号替换。
    """

    special_characters = OrderedDict({
        'emoji': (r'''#E\-[\w]*(抱拳|傲慢|得意|蛋糕|呕吐|闭嘴|礼物|yaoping|柠檬|流泪|怒火|撇嘴|太阳|咒骂|糗|猪猪|足球|磕头|大兵|
            电话|灯泡|飞鸟|奋斗|高兴|击打|饥饿|咖啡|口罩|骷髅|可乐|疯狂|白眼|阴险|叹气|奸笑|发呆|害羞|飞吻|怒火|悲伤|胜利|生病|弱|
            可怜|咖啡|酷酷|眩晕|流泪|发抖|难过|右哼哼|惊恐|悲伤|犯困|愤怒|凋谢|哈欠|拥抱|抓狂|鄙视|时间|啤酒|勾引|左哼哼|月亮|偷笑|
            震惊|惊讶|跳跳|瞌睡|可爱|衰样|好|憨笑|水果|色色|黑线|微笑|流汗|握手|心碎|问号|大哭|亲亲|抠鼻|拜拜|鬼脸|香吻|米饭|花朵|
            尴尬|擦汗|安慰|委屈|调皮|爱心|我一定尽力为您解答的哦|很棒|鼓掌)+''', " "),
        'emoji_number': (r'(#E\-[\w]+\[数字x\])', ' '),
        'emoji2': ('\[表情\]', ' '),
        'number_desc': (r'\[数字x\]', 'γ'),
        'sep': ('<sep>|<SEP>', conf.SEP),
        'link_desc': ('\[链接x\]', "ε"),
        'link': (r"(http|ftp|https):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&amp;:/~\+#]*[\w\-\@?^=%&amp;/~\+#])?", "ε"),
        'link2': (r"(http|ftp|https):\/\/ε", "ε"),
        'auto_resp': ('【收到不支持的消息类型，暂无法显示】', ' '),
        'greek': (r"#E\-[s]*(ν|γ|π|ζ|ρ|α|ε)*", ' '),
        'order': (r"\[ORDERID_[\d]+]", "[订单x]"),
        'invalid_char': ("[\s+\.\!\/_,$%^*(+\"\')]+|[+——()?【】“”！，。？、~@#￥%……&*（）]+", ' '),
        'as_link': ("ε", "[链接x]"),
        'as_number': ("γ", "[数字x]")
    })
    for char_type, char in special_characters.items():
        sentence = re.sub(char[0], char[1], sentence)
    return sentence


def sku():
    """构建sku关键字集合"""
    ware_path = conf.DATA_PATH_INTENTION + '/ware.txt'
    with open(ware_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        sku_list = []
        for line in lines:
            line = line.strip().split('\t')
            sku_list.extend(line[-1].split('/'))
    sku_set = set(sku_list)
    DataOp.save(sku_set, 'sku_set',is_model=False)


def build_keyword():
    """构建整体关键字集合"""
    data = DataOp.load_data('train')
    sku_set = DataOp.load_data('sku_set')
    tokens = data['customer'].apply(
        lambda x: [token for token, pos in pseg.cut(x) if pos in ['n', 'vn', 'nz']] # 名称、动名词、专有名词
    )
    tokens = list(filter(lambda token: len(token) >= 1,tokens))
    keywords = set(flatten_2d_list(tokens))
    keywords |= sku_set
    print('关键词长度为：', len(keywords))
    DataOp.save(keywords, 'keywords', is_model=False)


def build_model_data(file_type='train'):
    """构建模型专用数据集"""
    keywords = DataOp.load_data('keywords')
    df = DataOp.load_data(file_type)
    df['is_business'] = df['customer'].apply(lambda x: len(set(x) & keywords) >= 1) * 1
    df['is_business'] = df['is_business'].apply(lambda x: '__label__' + str(x) +'\t')
    df['customer_speech_list'] = df['customer_speech_list'].apply(lambda x: ' '.join(x))
    with open(os.path.join(conf.PROCESSED_DATA_PATH, f'{file_type}.csv'), 'w', encoding='utf-8') as f:
        full_content = (df['is_business'] + df['customer_speech_list'] + '\n').tolist()
        f.writelines(full_content)


if __name__ == '__main__':
    # 采样
    take_a_small_sample()

    # 进行数据集构建
    file_types = ['train', 'dev', 'test']
    for file_type in file_types:
        restructure(file_type)
        build_dataset(file_type)

    # 构建关键词
    sku()
    build_keyword()

    # # 构建模型训练直接用到的数据集
    file_types = ['train', 'dev', 'test']
    for file_type in file_types:
        build_model_data(file_type)
