# -*- coding: utf-8 -*-   
# @File   : predict_wjf.py         
# @Author : wjf
# @Time   : 2023/12/26 10:51
import os

import pandas as pd
import torch
from multiprocessing import Process, Queue

from loguru import logger

import model.config as conf
from transformers import BertTokenizer
from model.bigbird import BigBird
from sklearn.metrics import classification_report
import re
import concurrent.futures

foul_language = ['你妈嘞比', '妈嘞比', '妈嘞逼', '妈了个逼', '妈那个逼', '妈那逼', '妈那个臭逼', '恁妈那逼', '恁妈了个', '麻痹', '靠你妈', '日你妈', '他妈逼',
                 '操你妈', '妈了逼', '妈逼', '草你妈', '草泥马', '我靠你爸', 'SB', '傻逼', '傻比', '去你妈', '日你姐', '骂谁', '去你娘逼', '去球']  # 脏话词表
red_keyword = ['投诉', '举报', '九五九八', '九五五九八', '95598', '9598', '一二三四五', '幺二三四五', '12345',
               '一二三九八', '12398', '态度', '服务差', '打架', '诈骗', '九九八',
               '现场已报警', '缺德', '断子绝孙']  # 红色预警敏感词
orange_keyword = ['省里打', '骚扰', '往上面反应', '往上面反映', '往上级反映', '往上级反应', '给上级反映', '给领导说说', '看领导咋说', '曝光', '抖音',
                  '西瓜视频', '微博', '放网上', '快手', '记者', '火山小视频', '咋解决', '算账', '解决不了', '着火', '用电性质', '偷电', "没啥意义", "没意义", "别说了", '权利', '权力', "管不住", "管不着", "管不了"]  # 橙色预警敏感词
yellow_keyword = ['欠费停电', '一直停电', '多次停电', '电压低', '死人了', '电死人', '赔偿', '损失', '停电三四次',
                  '停电好几回', '老停电', '总停电', '老是停电', '总是停电', '经常停电', '停电四五次', '电压低',
                  '低电压', '拉不起', '带不动', '不叫修', '不维修', '装孬', '没法儿管', '没法管']  # 黄色预警敏感词
blue_keyword = ['电住人', '电着人', '电到人', '电住小孩', '频繁', '好几次', '好几回', '再跳', '老跳', '老出问题'
                                                                             '经常出问题', '点住人', '垫住人', '点住小孩', '垫住小孩']  # 蓝色预警敏感词


class ComplaintWarnPredict(object):
    def __init__(self, que_in, que_out, model, tokenizer, device, threshold=0.1):
        self.model = model
        self.tokenizer = tokenizer
        self.device = device
        self.threshold = threshold
        self.que_in = que_in
        self.que_out = que_out

        self.proc_debug = Process(target=self.debug_worker_v3)

        self.index = -1
        self.content = None

        self.is_finished = False

        self.is_warn = -1
        self.super_black_words = []
        self.black_words_low = []
        self.model_score = -1.0
        self.debug_result = []

    def start_debug(self):
        self.proc_debug.start()
        logger.info("进程{}启动......".format(self.proc_debug.pid))

    def stop_debug(self):
        if self.proc_debug is not None:
            self.proc_debug.terminate()
        self.is_finished = True
        logger.info("进程{}结束".format(self.proc_debug.pid))

    def get_data(self):
        if not self.que_in.empty():
            self.index, self.content = self.que_in.get()
        else:
            self.stop_debug()
            logger.info("进程{}结束".format(self.proc_debug.pid))

    def release_worker_v3(self, ):
        """worker function"""
        # def process_results(rst_black, rst_model, model_score):
        #     """
        #     对所有结果进行处理,形成最终结果
        #     :return:
        #     """
        #     rst_final = 0
        #     # 1):如果包含黑名单,且模型预测结果为有风险,则判定为有风险
        #     if rst_black and rst_model:
        #         rst_final = 1
        #     # 2):如果不包含黑名单,且模型预测风险概率低于0.5,则判定为无风险
        #     elif (not rst_black) and model_score < 0.5:
        #         rst_final = 0
        # 
        #     # 3):如果不包含黑名单,且模型预测风险概率不低于0.5,则再进行规则判断其风险
        #     elif (not rst_black) and model_score >= 0.5:
        #         res, matchs = predict_by_start_end_rule(self.content.replace(",", "").replace("，", ""))
        #         if res:
        #             rst_final = 1
        #         else:
        #             rst_final = 0
        #     return rst_final, matchs

        # rst_return = {'is_warn': 0, "super_black_words": [], "black_words_low": [], 'model_score': -1}
        # fixme:会不会更改self.content的内容?
        logger.info("进程{}收到数据{}".format(self.proc_debug.pid, self.content))
        content_rule = self.content.replace(",", "").replace("，", "")
        logger.info("进程{}处理规则数据{}".format(self.proc_debug.pid, content_rule))
        # 1-先进行超级白名单和超级黑名单筛选
        super_white_flag, super_black_flag, super_black_words = predict_by_super_keyword(content_rule)
        logger.info("进程{}超级名单结果:白名单{} 黑名单{} 黑名单{}".format(self.proc_debug.pid, super_white_flag, super_black_flag, super_black_words))

        # # 1.1-如果通话内容包含超级黑名单，则该通话直接定义为有风险,不需要经过后面的步骤
        # if super_black_flag:
        #     rst_return['is_warn'] = 1
        #     rst_return['super_black_words'] = super_black_words
        #     return rst_return
        # # 1.2-如果通话内容包含超级白名单，则该通话直接定义为无风险,不需要经过后面的步骤
        # elif super_white_flag:
        #     rst_return['is_warn'] = 0
        #     return rst_return

        # 2-普通黑名单关键词筛选
        common_black_flag, common_black_words = predict_by_keyword(content_rule, foul_language + red_keyword + orange_keyword + yellow_keyword + blue_keyword)
        logger.info("进程{}普通名单结果:黑名单{} 黑名单{}".format(self.proc_debug.pid, common_black_flag, common_black_words))

        # 2.1-如果通话内容不包含普通黑名单，则直接定义为无风险
        # if not common_black_flag:
        #     return rst_return

        # 3-如果没有包含超级黑白名单，则需要继续通过模型，超过预测阈值的则定义为有风险
        # fixme:会不会更改self.content的内容?
        content_model = '。'.join([context[3:] for context in self.content.split('\n')])
        logger.info("进程{}处理模型数据{}".format(self.proc_debug.pid, content_model))
        model_flag, score_model = predict_by_model(content_model, self.model, self.tokenizer, self.device, self.threshold)
        logger.info("进程{}模型预测结果:结果{} 分数{}".format(self.proc_debug.pid, model_flag, score_model))

        # # 4-对黑名单和模型预测结果进行综合判断,形成最后的结果
        # rst_final, key_words = process_results(common_black_flag, model_flag, score_model)
        # 
        # return rst_final, score_model, super_black_words, common_black_words
        return super_white_flag, super_black_flag, super_black_words, common_black_flag, common_black_words, model_flag, score_model

    def debug_worker_v3(self):
        """
        测试流程:每个规则都独立运行,保存所有结果
        :return:
        """
        while not self.que_in.empty():
            self.get_data()

            # print("进程{}收到数据{}".format(self.proc_debug.pid, self.content))
            content_rule = self.content.replace(",", "").replace("，", "")
            # print("进程{}处理规则数据{}".format(self.proc_debug.pid, content_rule))
            # 1-超级白名单和超级黑名单筛选
            super_white_flag, super_black_flag, super_black_words = predict_by_super_keyword(content_rule)
            logger.info("进程{}超级名单结果:白名单{} 黑名单{} 黑名单{}".format(self.proc_debug.pid, super_white_flag, super_black_flag, super_black_words))

            # 2-普通黑名单关键词筛选
            common_black_flag, common_black_words = predict_by_keyword(content_rule, foul_language + red_keyword + orange_keyword + yellow_keyword + blue_keyword)
            logger.info("进程{}普通名单结果:黑名单{} 黑名单{}".format(self.proc_debug.pid, common_black_flag, common_black_words))
            # 3-模型筛选
            # fixme:会不会更改self.content的内容?
            content_model = '。'.join([context[3:] for context in self.content.split('\n')])
            # logger.info("进程{}处理模型数据{}".format(self.proc_debug.pid, content_model))
            model_flag, score_model = predict_by_model(content_model, self.model, self.tokenizer, self.device, self.threshold)
            logger.info("进程{}模型预测结果:结果{} 分数{}".format(self.proc_debug.pid, model_flag, score_model))

            # 4-两端筛选
            start_white_flag, start_white_words, end_black_flag, end_black_words, end_white_flag, end_white_words = predict_by_start_end_rule(
                content_rule.replace(",", "").replace("，", ""))
            logger.info(
                "进程{}首尾名单结果:开头白名单结果{} 开头白名单{} 结束黑名单结果{} 结束黑名单{} 结束白名单结果{} 结束白名单{}".format(self.proc_debug.pid,
                                                                                          start_white_flag, start_white_words,
                                                                                          end_black_flag, end_black_words,
                                                                                          end_white_flag, end_white_words))
            logger.info()
            self.que_out.put(
                (
                    self.index, super_white_flag, super_black_flag, super_black_words,
                    common_black_flag, common_black_words,
                    model_flag, score_model,
                    start_white_flag, start_white_words, end_black_flag, end_black_words, end_white_flag, end_white_words))

        logger.info("进程{}结束".format(self.proc_debug.pid))
        # if self.proc_debug is not None:
        #     self.proc_debug.terminate()


def change_label(data_df):
    label = data_df.label
    # if label in ['1-红色', '2-橙色', '3-黄色', '4-蓝色']:
    #     label = '1-红色'
    # return label
    return cls[label]


def dict_label(data_df):
    label = data_df.label_new
    return conf.cls[label]


def predict_by_keyword(x, keyword, client_flag=False, reverse=['回访']):
    """
    根据显性风险敏感词判定是否引起投诉
    :param x: 通话文本
    :param keyword: 敏感词列表
    :param reverse:
    :return: 1表示投诉，word表示引起投诉的敏感词； 0表示不会引起投诉
    """
    result = 0
    reason = []
    if str(x) == 'nan':
        return 0, []
    else:
        pass
    sentence_list = x.split('\n')
    if client_flag:
        for sentence in sentence_list:
            if '回访' in sentence:
                reason = []
                break
            if sentence[:2] == "客户":
                for word in keyword:
                    if word in sentence and sum(1 for i in reverse if i in sentence) == 0:
                        reason.append(word)
                    else:
                        pass
    else:
        sentences = '。'.join(sentence_list)
        for word in keyword:
            if word in sentences and sum(1 for i in reverse if i in sentences) == 0:
                reason.append(word)
            else:
                pass
    if len(reason) > 0:
        result = 1
    return result, list(set(reason))


def predict_by_start_end_rule(content, len_start=5, len_end=10):
    """
    依据对话开始5句和结尾10句内容计算通话风险
    :param content:
    :param len_start:
    :param len_end:
    :return:
    """
    begin_white_words, end_white_words, end_black_words = [], [], []

    begin_white = ['指挥中心', '移动', '联通', '电信', '供服', '指挥员', '主动抢修', '吃饭没', '耽误', '打扰', '弄啥嘞']
    end_white = ['行', '好', '中', '再见', '谢谢', "谢啦", "谢啊", "谢了", "谢你", '明白了', '拜拜', '辛苦', '好嘞', '那就这', '就这啊', '处理一下', '我知道', '知道了',
                 '哈哈', '没事', '挂了啊', '那你忙', '你忙吧', '不好意思', '麻烦了', '麻烦你了', '麻烦你啊', '打扰了', '打扰你了', '打错了', '感谢', '可以']

    # white_end = ["感谢", "谢谢", "谢啦", "谢啊", "谢了", "谢你", "拜拜", "辛苦了", '麻烦了', '麻烦你了']
    end_black = ['就这吧', '赶紧', '赶快', '不满意']

    reverse = ['不好', '好不', '行不', '不行', '中不', '不中', '可以不', '不可以', '满意不', '辛苦不', '不辛苦', '打扰不', '不打扰']

    texts = str(content).split("\n")

    if len(texts) >= len_end:
        begin_text = '\n'.join(texts[:len_start])
        ends_text = '\n'.join(texts[-len_end:])
    else:
        begin_text = content
        ends_text = content
    # 如果通话开始部分包含begin关键词，表示通话双方为亲友或特殊客服号码，直接判定为无风险
    begin_white_res, begin_white_words = predict_by_keyword(begin_text, begin_white, client_flag=True, reverse=['打扰不', '不打扰'])

    # if begin_res:
    #     return 0, begin_key

    # 如果通话结束部分包含black关键词，表示用户有焦急情绪或不耐烦情绪，直接判定为有风险
    end_black_res, end_black_words = predict_by_keyword(ends_text, end_black, client_flag=True, reverse=['满意不'])
    # if black_res:
    #     return 1, black_key

    # 如果通话结束部分包含white关键词，表示客户客户反馈正面积极，直接判定为无风险
    end_white_res, end_black_words = predict_by_keyword(ends_text, end_white, client_flag=True, reverse=reverse)
    # if white_res:
    #     return 0, white_key
    return begin_white_res, begin_white_words, end_black_res, end_black_words, end_white_res, end_black_words


def predict_by_super_keyword(context):
    """
    超级黑名单/白名单筛选
    :param context:
    :return:
    """
    white_end = ["感谢", "谢谢", "谢啦", "谢啊", "谢了", "谢你", "拜拜", "辛苦了", '麻烦了', '麻烦你了']
    white_all = ['不打算投诉', '不想投诉', '不去投诉', "没有投诉", "没投诉",
                 "没举报", "没有举报", "不会投诉", "不会举报",
                 '不打95598', '没打95598']
    black_all = ["没啥意义", "没意义", "别说了", "权利", "管不住", "管不着", "管不了"]
    texts = str(context).split("\n")

    # 结束语:通话的最后10句
    if len(texts) >= 10:
        ends = '.'.join(texts[-10:])
    else:
        ends = context
    super_white_flag = False
    super_black_flag = False
    matchs = []
    white_pattern_end = re.compile('|'.join(white_end))
    white_pattern_all = re.compile('|'.join(white_all))
    black_pattern = re.compile('|'.join(black_all))
    black_match = set(black_pattern.findall(context))

    if len(white_pattern_end.findall(ends)) > 0 or len(white_pattern_all.findall(context)) > 0:
        super_white_flag = True
    if len(black_match) >= 2:
        super_black_flag = True

    matchs.append(white_pattern_end.findall(ends))
    matchs.append(white_pattern_all.findall(context))
    matchs.append(list(black_match))

    return super_white_flag, super_black_flag, matchs


def predict_by_model(content, model, tokenizer, device, threshold):
    """
    模型预测风险
    :param tokenizer:
    :param content:
    :param model:
    :param threshold:
    :return:
    """
    token = tokenizer.tokenize(content)
    token = ['[SEP]' if t == '。' else t for t in token]
    if len(token) > conf.max_length - 2:
        token = token[:conf.max_length - 2]
    token = ['[CLS]'] + token + ['[SEP]']
    token_pad = token + ['PAD'] * (conf.max_length - len(token))
    token_id = tokenizer.convert_tokens_to_ids(token_pad)
    atten_mask = [1] * len(token) + [0] * (conf.max_length - len(token))
    token_id, atten_mask = torch.tensor(token_id, dtype=torch.long), torch.tensor(atten_mask, dtype=torch.long)

    with torch.no_grad():
        input = token_id.to(device).unsqueeze(dim=0)
        atten_mask = atten_mask.to(device).unsqueeze(dim=0)
        pred = model(input, atten_mask)

        # 2分类BCE
        pred = torch.sigmoid(pred)  # 使用sigmoid函数转换为概率值
        pred1 = (pred >= threshold).int()
        model_flag = pred1.item()
        score_model = pred.item()
    return model_flag, score_model


def evaluate(row, tokenizer, model, device, type="v3"):
    super_white_flag, super_black_flag, matchs, keywords, return_result, return_score = 0, 0, [], [], 0, 0
    print("当前行：", row.indexs)
    content = str(row.context)
    # 行数太少直接返回
    if len(content.split('\n')) < 5:
        return 0, 0, ["行太少"], keywords
    # 先进行黑名单关键词筛选
    result, keywords = predict_by_keyword(content, foul_language + red_keyword + orange_keyword + yellow_keyword + blue_keyword)
    # 进行超级白名单和超级黑名单筛选
    if result and (type == "v2" or type == "v3"):
        super_white_flag, super_black_flag, matchs = predict_by_super_keyword(str(row.context).replace(",", "").replace("，", ""))
        if super_black_flag:
            return_result = 1
        if super_white_flag and not super_black_flag:
            return_result = 0
        return return_result, return_score, matchs, keywords

    content = '。'.join([context[3:] for context in content.split('\n')])
    label = int(conf.cls[row.label_new])

    token = tokenizer.tokenize(content)
    token = ['[SEP]' if t == '。' else t for t in token]
    if len(token) > conf.max_length - 2:
        token = token[:conf.max_length - 2]
    token = ['[CLS]'] + token + ['[SEP]']
    token_pad = token + ['PAD'] * (conf.max_length - len(token))
    token_id = tokenizer.convert_tokens_to_ids(token_pad)
    atten_mask = [1] * len(token) + [0] * (conf.max_length - len(token))
    token_id, atten_mask, label = torch.tensor(token_id, dtype=torch.long), torch.tensor(atten_mask, dtype=torch.long), \
                                  torch.tensor(label, dtype=torch.long)

    with torch.no_grad():
        input = token_id.to(device).unsqueeze(dim=0)
        atten_mask = atten_mask.to(device).unsqueeze(dim=0)
        pred = model(input, atten_mask)
        # return pred.argmax(1).item()

        # 2分类BCE
        pred = torch.sigmoid(pred)  # 使用sigmoid函数转换为概率值
        # pred = torch.round(pred)  # 使用阈值进行分类，转换为二分类标签
        threshold = 0.1
        pred1 = (pred >= threshold).int()
        return_result = pred1.item()
        return_score = pred.item()
        if type == "v2" and not result:
            return_result = 0
        if type == "v3" and not result:
            # 如果分值小于0.5, 则一律为0
            if pred.item() < 0.5:
                return_result = 0
            else:
                # 如果分值大于0.5, 则执行规则过滤
                res, matchs = predict_by_start_end_rule(str(row.context).replace(",", "").replace("，", ""))
                if res:
                    return_result = 1
                else:
                    return_result = 0
        # t2 = time.time() * 1000
        # print("毫秒：", t2 - t1)
        return return_result, return_score, matchs, keywords


def evaluate_wrapper(x):
    # global total
    result, score, matchs, keywords = evaluate(x[1], tokenizer, bigbird_model, device, type)
    # with lock:
    #     total += 1
    #     print(total, " --> ", len(test_df))
    return pd.Series([result, score, matchs, keywords])


def metrics_report(df):
    # 原始标签值
    y_true = df['gt']
    # 模型预测的标签值
    y_pred = df['predict']

    # 计算精确率和召回率
    report = classification_report(y_true, y_pred, target_names=conf.cls.keys(), output_dict=True, zero_division=0)

    # 创建保存结果的数据框
    result_df = pd.DataFrame(
        columns=['精确率-百分比', '精确率-真实数量', '精确率-预测数量', '召回率-百分比', '召回率-预测数量',
                 '召回率-真实数量'])

    # 遍历每个类别
    for category in conf.cls.keys():
        true_count = df[df['gt'] == conf.cls[category]].shape[0]
        pred_count = df[df['predict'] == conf.cls[category]].shape[0]
        pre_true_count = df[(df['predict'] == conf.cls[category]) & (df['gt'] == conf.cls[category])].shape[0]
        if pre_true_count == 0:
            precision = recall = 0
        else:
            precision = str(round((pre_true_count / pred_count) * 100, 2)) + "%"
            recall = str(round((pre_true_count / true_count) * 100, 2)) + "%"
        # 将统计结果添加到数据框中
        result_df.loc[category] = [precision, pre_true_count, pred_count, recall, pre_true_count, true_count]
    return result_df


if __name__ == '__main__':
    logger.add("log/predict.log")

    tokenizer = BertTokenizer.from_pretrained(conf.model_name_or_path)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    bigbird_model = BigBird(n_model=conf.n_model, cls_nu=len(conf.cls))
    bigbird_model.load_state_dict(torch.load('bigbird_checkpoint/grid_new/2-weight4-4096/5_0.6842948717948718.pt', map_location=torch.device('cpu')))

    bigbird_model.to(device)
    bigbird_model.eval()

    # 读取要处理的数据
    df = pd.read_excel(conf.test_path, engine='openpyxl', dtype='object')
    logger.info(len(df))
    cls = {'6-内部通话-绿色': 'no', '6-内部通话-红色': 'yes', '1-红色': 'yes', '2-橙色': 'yes', '3-黄色': 'yes', '4-蓝色': 'yes', '5-绿色': 'no', '7-看不懂': 'no'}
    test_df = df

    data_que = Queue()
    rst_que = Queue()
    for i_data in range(test_df.shape[0]):
        # for i_data in range(5):
        data_que.put((i_data, test_df["context"].values[i_data]))

    proc_list = []
    for i_proc in range(5):
        cwp_i = ComplaintWarnPredict(data_que, rst_que, bigbird_model, tokenizer, device, threshold=0.1)
        cwp_i.start_debug()
        proc_list.append(cwp_i)

    for i_proc in range(5):
        proc_list[i_proc].proc_debug.join()

    logger.info("全部进程结束")
    logger.info("开始保存结果......")
    rst_list = []
    while not rst_que.empty():
        rst_list.append(list(rst_que.get()))
    # (self.index, super_white_flag, super_black_flag, super_black_words, common_black_flag, common_black_words, model_flag, score_model, start_end_flag, start_end_words))
    df_rst = pd.DataFrame(
        columns=['index', "super_white_flag", "super_black_flag", "super_black_words", "common_black_flag", "common_black_words", "model_flag", "score_model",
                 "start_white_flag", "start_white_words", "end_black_flag", "end_black_words", "end_white_flag", "end_white_words"], data=rst_list)

    df_merge = pd.merge(left=test_df, right=df_rst, left_on='indexs', right_on='index', how='left')

    pred_result_xlsx = os.path.basename(conf.test_path).replace(".xlsx", "_pred_result.xlsx")

    if not os.path.exists('result'):
        os.mkdir('result')

    df_merge.to_excel(os.path.join('result', pred_result_xlsx), index=False)
