import pandas as pd
import torch


from transformers import BertTokenizer, BigBirdModel, BertForSequenceClassification,AutoTokenizer

from torch import nn
# from torch.nn import CrossEntropyLoss
# from transformers import AdamW, get_linear_schedule_with_warmup
from model.longformer_model import LongForm
import model.config as conf
from torch.utils.data import DataLoader
from model.bigbird import BigBird
import time
from sklearn.metrics import classification_report
import re

foul_language = ['你妈嘞比', '妈嘞比', '妈嘞逼', '妈了个逼', '妈那个逼', '妈那逼', '妈那个臭逼', '恁妈那逼', '恁妈了个', '麻痹', '靠你妈', '日你妈', '他妈逼',
                              '操你妈', '妈了逼', '妈逼', '草你妈', '草泥马', '我靠你爸', 'SB', '傻逼', '傻比', '去你妈', '日你姐', '骂谁', '去你娘逼', '去球']  # 脏话词表
red_keyword = ['投诉', '举报', '九五九八', '九五五九八', '95598', '9598', '一二三四五', '幺二三四五', '12345',
                            '一二三九八', '12398', '态度', '服务差', '打架', '权利', '权力', '诈骗', '九九八',
                            '现场已报警', '缺德', '断子绝孙']  # 红色预警敏感词
orange_keyword = ['省里打', '骚扰', '往上面反应', '往上面反映', '往上级反映', '往上级反应', '给上级反映', '给领导说说', '看领导咋说', '曝光', '抖音',
                       '西瓜视频', '微博', '放网上', '快手', '记者', '火山小视频', '咋解决', '算账', '解决不了', '管不住', '着火', '用电性质', '偷电']  # 橙色预警敏感词
yellow_keyword = ['欠费停电', '一直停电', '多次停电', '电压低', '死人了', '电死人', '赔偿', '损失', '停电三四次',
                       '停电好几回', '老停电', '总停电', '老是停电', '总是停电', '经常停电', '停电四五次',  '电压低',
                       '低电压', '拉不起', '带不动', '不叫修', '不维修', '装孬', '没法儿管', '没法管']  # 黄色预警敏感词
blue_keyword = ['电住人', '电着人', '电到人', '电住小孩', '频繁', '好几次', '好几回', '再跳', '老跳', '老出问题'
                     '经常出问题', '点住人', '垫住人', '点住小孩', '垫住小孩']  # 蓝色预警敏感词

def whether_include_keyword(x, keyword, client_flag=False, reverse=['回访']):
    """
    根据显性风险敏感词判定是否引起投诉
    :param x: 通话文本
    :param keyword: 敏感词列表
    :return: 1表示投诉，word表示引起投诉的敏感词； 0表示不会引起投诉
    """
    result = 0
    reason = []
    if str(x) == 'nan':
        return 0, []
    else:
        pass
    sentence_list = x.split('\n')
    if client_flag:
        for sentence in sentence_list:
            if '回访' in sentence:
                reason = []
                break
            if sentence[:2] == "客户":
                for word in keyword:
                    if word in sentence and sum(1 for i in reverse if i in sentence) == 0:
                        reason.append(word)
                    else:
                        pass
    else:
        sentences = '。'.join(sentence_list)
        for word in keyword:
            if word in sentences and sum(1 for i in reverse if i in sentences) == 0:
                reason.append(word)
            else:
                pass
    if len(reason) > 0:
        result = 1
    return result, list(set(reason))

def change_label(data_df):
    label = data_df.label
    # if label in ['1-红色', '2-橙色', '3-黄色', '4-蓝色']:
    #     label = '1-红色'
    # return label
    return cls[label]

def dict_label(data_df):
    label = data_df.label_new
    return conf.cls[label]

def by_relu(x):
    begin = ['指挥中心', '移动', '联通', '电信', '供服', '指挥员', '主动抢修', '吃饭没', '耽误', '打扰', '弄啥嘞']
    white = ['行', '好', '中', '再见', '谢谢', "谢啦", "谢啊", "谢了", "谢你", '明白了', '拜拜', '辛苦', '好嘞', '那就这', '就这啊', '处理一下', '我知道', '知道了', '哈哈', '没事', '挂了啊', '那你忙', '你忙吧', '不好意思', '麻烦了', '麻烦你了', '麻烦你啊', '打扰了', '打扰你了', '打错了', '感谢', '可以']
    black = ['就这吧', '赶紧', '赶快', '不满意']
    reverse = ['不好', '好不', '行不', '不行', '中不', '不中', '可以不', '不可以', '满意不', '辛苦不', '不辛苦', '打扰不', '不打扰']
    texts = str(x).split("\n")
    begin_text = None
    ends_text = None
    if len(texts) >= 10:
        begin_text = '\n'.join(texts[:5])
        ends_text = '\n'.join(texts[-10:])
    else:
        begin_text = x
        ends_text = x
    begin_res, begin_key = whether_include_keyword(begin_text, begin, client_flag=True, reverse=['打扰不', '不打扰'])
    if begin_res:
        return 0, begin_key
    black_res, black_key = whether_include_keyword(ends_text, black, client_flag=True, reverse=['满意不'])
    if black_res:
        return 1, black_key
    white_res, white_key = whether_include_keyword(ends_text, white, client_flag=True, reverse=reverse)
    if white_res:
        return 0, white_key
    return 1, []

def by_ai(x):
    white = ["感谢", "谢谢", "谢啦", "谢啊", "谢了", "谢你", "拜拜", "辛苦了", '麻烦了', '麻烦你了']
    white2 = ['不打算投诉', '不打95598', '没打95598', '不想投诉', '不去投诉', "没有投诉", "没投诉", "没举报",
              "没有举报", "不会投诉", "不会举报"]
    black = ["没啥意义", "没意义", "别说了", "权利", "管不住", "管不着", "管不了"]
    texts = str(x).split("\n")
    ends = None
    if len(texts) >= 10:
        ends = '.'.join(texts[-10:])
    else:
        ends = x
    white_flag = False
    black_flag = False
    matchs = []
    white_pattern = re.compile('|'.join(white))
    white_pattern2 = re.compile('|'.join(white2))
    black_pattern = re.compile('|'.join(black))
    black_match = set(black_pattern.findall(x))
    if len(white_pattern.findall(ends)) > 0 or len(white_pattern2.findall(x)) > 0:
        white_flag = True
    if len(black_match) >= 2:
        black_flag = True

    matchs.append(white_pattern.findall(ends))
    matchs.append(white_pattern2.findall(x))
    matchs.append(list(black_match))
    return white_flag, black_flag, matchs


def evaluate(row, tokenizer, model, device, type="v3"):
    print("当前行：", row.indexs)
    content = str(row.context)
    # 行数太少直接返回
    if len(content.split('\n')) < 5:
        return 0, 0, ["行太少"], []
    # 先进行黑名单关键词筛选
    result, keywords = whether_include_keyword(content, foul_language + red_keyword + orange_keyword + yellow_keyword + blue_keyword)
    content = '。'.join([context[3:] for context in content.split('\n')])
    label = int(conf.cls[row.label_new])

    token = tokenizer.tokenize(content)
    token = ['[SEP]' if t == '。' else t for t in token]
    if len(token) > conf.max_length - 2:
        token = token[:conf.max_length - 2]
    token = ['[CLS]'] + token + ['[SEP]']
    token_pad = token + ['PAD'] * (conf.max_length - len(token))
    token_id = tokenizer.convert_tokens_to_ids(token_pad)
    atten_mask = [1] * len(token) + [0] * (conf.max_length - len(token))
    token_id, atten_mask, label = torch.tensor(token_id, dtype=torch.long), torch.tensor(atten_mask, dtype=torch.long), \
                                  torch.tensor(label, dtype=torch.long)

    with torch.no_grad():
        input = token_id.to(device).unsqueeze(dim=0)
        atten_mask = atten_mask.to(device).unsqueeze(dim=0)
        pred = model(input, atten_mask)
        # return pred.argmax(1).item()

        # 2分类BCE
        pred = torch.sigmoid(pred)  # 使用sigmoid函数转换为概率值
        # pred = torch.round(pred)  # 使用阈值进行分类，转换为二分类标签
        threshold = 0.1
        pred1 = (pred >= threshold).int()
        white_flag, blake_flag, matchs = 0, 0, []
        if type == "v1":
            return pred1.item(), pred.item(), matchs, keywords
        elif type == "v2":
            # result = 1
            if result:
                white_flag, blake_flag, matchs = by_ai(str(row.context).replace(",", "").replace("，", ""))
                if blake_flag:
                    return 1, pred.item(), matchs, keywords
                if white_flag:
                    return 0, pred.item(), matchs, keywords
                return pred1.item(), pred.item(), matchs, keywords
            else:
                return 0, pred.item(), matchs, keywords
        else:
            # 如果黑名单关键词存在，则执行深度学习这套规则
            if result:
                # 如果大于阈值，则进行白名单判定
                if pred1.item():
                    white_flag, blake_flag, matchs = by_ai(str(row.context).replace(",", "").replace("，", ""))
                    if blake_flag:
                        return 1, pred.item(), matchs, keywords
                    if white_flag:
                        return 0, pred.item(), matchs, keywords
                else:
                    return 0, pred.item(), matchs, keywords
            # 如果黑名单关键词不存在，则执行另一套规则
            else:
                # 如果分值小于0.5, 则一律为0
                if pred.item() < 0.5:
                    return 0, pred.item(), matchs, keywords
                else:
                    # 如果分值大于0.5, 则执行规则过滤
                    res, matchs = by_relu(str(row.context).replace(",", "").replace("，", ""))
                    if res:
                        return 1, pred.item(), matchs, keywords
                    else:
                        return 0, pred.item(), matchs, keywords
        # t2 = time.time() * 1000
        # print("毫秒：", t2 - t1)
        return pred1.item(), pred.item(), matchs, keywords


def metrics_report(df):
    # 原始标签值
    y_true = df['gt']
    # 模型预测的标签值
    y_pred = df['predict']

    # 计算精确率和召回率
    report = classification_report(y_true, y_pred, target_names=conf.cls.keys(), output_dict=True, zero_division=0)

    # 创建保存结果的数据框
    result_df = pd.DataFrame(
        columns=['精确率-百分比', '精确率-真实数量', '精确率-预测数量', '召回率-百分比', '召回率-预测数量',
                 '召回率-真实数量'])

    # 遍历每个类别
    for category in conf.cls.keys():
        true_count = df[df['gt'] == conf.cls[category]].shape[0]
        pred_count = df[df['predict'] == conf.cls[category]].shape[0]
        pre_true_count = df[(df['predict'] == conf.cls[category]) & (df['gt'] == conf.cls[category])].shape[0]
        if pre_true_count == 0:
            precision = recall = 0
        else:
            precision = str(round((pre_true_count / pred_count)*100, 2))+"%"
            recall = str(round((pre_true_count / true_count)*100, 2))+"%"
        # 将统计结果添加到数据框中
        result_df.loc[category] = [precision, pre_true_count, pred_count, recall, pre_true_count, true_count]
    return result_df


if __name__ == '__main__':
    tokenizer = BertTokenizer.from_pretrained(conf.model_name_or_path)
    # device = 'cpu'
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    # model = Bert_Attention()
    # lf_model = LongForm(n_model=conf.n_model, cls_nu=len(conf.cls))
    bigbird_model = BigBird(n_model=conf.n_model, cls_nu=len(conf.cls))
    bigbird_model.load_state_dict(torch.load('bigbird_checkpoint/grid_new/2-weight4-4096/5_0.6842948717948718.pt'))
    # bigbird_model.load_state_dict(torch.load('public_checkpoint/5_0.6122448979591837_01.pt'))
    # bigbird_model = torch.load('bigbird_checkpoint/7_0.5452488687782805.pt')
    # bigbird_model = torch.load('lf_checkpoint/5_0.4438202247191011.pt')
    bigbird_model.to(device)
    bigbird_model.eval()
    df = pd.read_excel(conf.test_path, engine='openpyxl', dtype='object')
    print(len(df))
    cls = {'6-内部通话-绿色': 'no', '6-内部通话-红色': 'yes', '1-红色': 'yes', '2-橙色': 'yes', '3-黄色': 'yes', '4-蓝色': 'yes', '5-绿色': 'no', '7-看不懂': 'no'}
    # cls = {'6-内部通话': 'inner', '1-红色': 'yes', '2-橙色': 'yes', '3-黄色': 'yes', '4-蓝色': 'yes', '5-绿色': 'no'}
    # test_df = pd.DataFrame()
    # for class_label, value in cls.items():
    #     class_data = df[(df['label'] == class_label)]
    #     test_df = pd.concat([test_df, class_data])
    # print(len(test_df))
    test_df = df
    test_df['label_new'] = test_df.apply(lambda x: change_label(x), axis=1)
    test_df['gt'] = test_df.apply(lambda x: dict_label(x), axis=1)
    test_df[['predict', 'score', 'keyword', '黑名单']] = test_df.apply(lambda x: pd.Series(evaluate(x, tokenizer, bigbird_model, device, "v2")), axis=1, result_type='expand')
    # result_df = metrics_report(test_df)
    # test_df.to_excel('result/1012/1007_4096_2_均衡_1715.xlsx')
    # 创建Excel文件
    with pd.ExcelWriter('result/许昌历史通话文本合并通话记录信息_20230801-20231222_pred_v2.xlsx') as writer:
        test_df.to_excel(writer, sheet_name='原始数据')
        # result_df.to_excel(writer, sheet_name='统计结果')
    # # 模型预测
    # test_data = torch.randn(2, 3)  # 测试数据，形状为(2, 3)
    # with torch.no_grad():
    #     test_outputs = model(test_data)  # 模型预测输出
    #     test_predictions = torch.sigmoid(test_outputs)  # 使用sigmoid函数转换为概率值
    #     test_predictions = torch.round(test_predictions)  # 使用阈值进行分类，转换为二分类标签
