# _*_ coding : utf-8 _*_
# @Time : 2023/10/9 10:46
# @Author : momo
# @File : data_helper
# @Project : bert-textcnn
import re

import numpy as np
from tqdm import tqdm
import time

from data_helper import DataHelper
import data_helper
import config
import pickle
from keras_bert import get_custom_objects
from tensorflow.keras.models import load_model
from keras_bert import load_vocabulary, Tokenizer
import pandas as pd

from _Logger import Logger
from ha_bert_textcnn import focal_loss_with_rdrop

# 加载bert字典，构造分词器。
token_dict = load_vocabulary(config.bert_dict_path)
tokenizer = Tokenizer(token_dict)

# 将自定义损失函数添加到 custom_objects
custom_objects = get_custom_objects()
# custom_objects["custom_loss1"]=bert_textcnn_model.custom_loss1
# custom_objects["custom_loss2"]=bert_textcnn_model.custom_loss2
# custom_objects["custom_loss3"]=bert_textcnn_model.custom_loss3
# custom_objects["get_labels_weight"]=bert_textcnn_model.get_labels_weight
custom_objects["focal_loss"]=focal_loss_with_rdrop

# 加载训练好的模型
bert = load_model(config.bert_model_path,custom_objects=custom_objects)
model = load_model(config.model_path, custom_objects=custom_objects)

jy_scope=1
label1=5
label2=6
label3=7

tio=3

#  啊啊啊啊啊啊啊啊啊啊啊啊
# 三层prediction
# 每层用一个分类器mlb1 2 3 得到对应层的标签

# df : test_demo  测试文件4000行
# fn : node  用来反匹配编码
# 预测文件内的所有
def predict(df):
    rematch_dict = DataHelper.make_node_dict("节点名称", "节点编码")
    for i in tqdm(range(df.shape[0])):
        # 拼接文字内容  经营范围+专利
        if str(df.iloc[i, tio]) != "nan":
            # text = str(df.iloc[i, jy_scope]) + "还有专利:" + str(df.iloc[i, tio]).replace("一种","")
            text = re.sub("一种", "", str(df.iloc[i, tio]))
        else:
            text = str(df.iloc[i, jy_scope])
        # 先洗再预测
        text = DataHelper.wash_text(text)
        # 预测单文本
        l1, l2, l3 = predict_one(text, rematch_dict)
        # 标签列表还原为规定格式
        if len(l1) > 0:
            df.iloc[i, label1] = ";".join(l1)
        if len(l2) > 0:
            df.iloc[i, label2] = ";".join(l2)
        if len(l3) > 0:
            df.iloc[i, label3] = ";".join(l3)

    return df[["唯一id", "经营范围", "专利数",'标签1级','标签2级','标签3级']]

# 预测单个句子的标签
def predict_one(text, rematch_dict):
    # 对句子编码
    token_id, segment_id = tokenizer.encode(first=text, max_len=config.max_len)
    predictions = model.predict([np.array([token_id]), np.array([segment_id])])

    pre_label_list=[]
    # 3个输出
    for i in range(3):
        # 取出indexes对应的labels  ./data/mlb{i}.pkl
        mlb = pickle.load(open(config.mlb_dir + str(i) + ".pkl", 'rb'))
        if i == 0:
            p = predictions[i][0]
        elif i == 1:
            p = predictions[i][0]
        else:
            p = predictions[i][0]

        # cls_list=mlb.classes_.tolist()
        # cls_list=[rematch_dict[l[l.rfind("-") + 1:]] for l in cls_list]
        # print("")
        # print("==============类别：=======================")
        # print(cls_list)
        # print("==============概率：=======================")
        # print(p)
        # # 选择概率合适的标签下标
        indexes=[]
        if i == 0:
            # 1类单标签：多标签=6：1
            # 允许选概率最大的标签因为1类必然有
            ar = 0.3438
            indexes = choose_indexes(p, ar)
        if i == 1:
            # 2：1
            ar = 0.30
            indexes = [j for j in range(len(p)) if p[j] > ar]
        if i == 2:
            # 3：5
            ar = 0.20
            indexes = [j for j in range(len(p)) if p[j] > ar]

        labels = [mlb.classes_.tolist()[j] for j in indexes]
        pre_label_list.append([rematch_dict[l[l.rfind("-") + 1:]]
                               for l in labels])

    return pre_label_list[0], pre_label_list[1], pre_label_list[2]
# # 预测单个句子的标签
# def predict_noe(text, rematch_dict):
#     l1_list, l2_list, l3_list = [], [], []
#     # 对句子编码
#     token_id, segment_id = tokenizer.encode(first=text, max_len=config.max_len)
#     prediction = model.predict([[token_id], [segment_id]])[0]
#     # 一些监控标签的输出
#     # print("[PRED] predict...")
#     # print(text,"\n",mlb.classes_.tolist(),"\n",prediction)
#     # # print("[OUT] p of every label array:")
#     # print(prediction)
#     # 选择概率合适的标签下标
#     ar = config.accept_rate
#     indexes = [i for i in range(len(prediction)) if prediction[i] > ar]
#     # 取出indexes对应的labels
#     labels = [mlb.classes_.tolist()[i] for i in indexes]
#     # 总标签
#     # print(mlb.classes_.tolist())
#
#     # 正则：判断有几个- 表示它是第几级的标签
#     # p0=r'[^-]*'
#     # 二级
#     p2 = r'[^-]*-[^-]*'
#     # 三级
#     p3 = r'[^-]*-[^-]*-[^-]*'
#     for l in labels:
#         l = str(l)
#         if re.match(p3, l):
#             l3_list.append(rematch_dict[l[l.rfind("-") + 1:]])
#         elif re.match(p2, l):
#             l2_list.append(rematch_dict[l[l.rfind("-") + 1:]])
#         else:
#             l1_list.append(rematch_dict[l])
#
#     return l1_list, l2_list, l3_list

def choose_indexes(p, ar):
    indexes = [j for j in range(len(p)) if p[j] > ar]
    # 没有的标签就选最大概率
    if len(indexes)<=0:
        mx=0
        mxi=0
        for i in range(len(p)):
            if p[i]>mx:
                mx=p[i]
                mxi=i
        indexes.append(mxi)
    return indexes
if __name__ == "__main__":

    # Logger.build("predict")
    dh = data_helper.DataHelper()

    xy = dh.read_and_merge_content_excl(config.ori_demo_path, config.ori_zl_path, config.ori_node_path)
    res_df = predict(xy[:4000])

    demo_df = pd.read_excel(config.ori_demo_path)

    demo_df=pd.concat([res_df,demo_df[-1000:]],ignore_index=True)
    demo_df.to_excel(config.res_dir + "res_" + time.strftime("%Y%m%d-%H%M%S", time.localtime()) + ".xlsx", index=False)
